Merge branch 'dev-v2.7' into dev-v2.7

pull/2546/head
Sakala Venkata Krishna Rohit 2023-04-19 16:17:20 -07:00 committed by GitHub
commit 8860962300
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
84 changed files with 5964 additions and 44 deletions

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,16 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/namespace: cattle-neuvector-system
catalog.cattle.io/release-name: neuvector-crd
apiVersion: v1
appVersion: 5.1.2
description: Helm chart for NeuVector's CRD services
home: https://neuvector.com
icon: https://avatars2.githubusercontent.com/u/19367275?s=200&v=4
maintainers:
- email: support@neuvector.com
name: becitsthere
name: neuvector-crd
type: application
version: 102.0.1+up2.4.3

View File

@ -0,0 +1,15 @@
# NeuVector Helm Chart
Helm chart for NeuVector container security's CRD services. NeuVector's CRD (Custom Resource Definition) capture and declare application security policies early in the pipeline, then defined policies can be deployed together with the container applications.
Because the CRD policies can be deployed before NeuVector's core product, this separate helm chart is created. For the backward compatibility reason, crd.yaml is not removed in the 'core' chart. If you use this 'crd' chart, please set `crdwebhook.enabled` to false in the 'core' chart.
## Configuration
The following table lists the configurable parameters of the NeuVector chart and their default values.
Parameter | Description | Default | Notes
--------- | ----------- | ------- | -----
`openshift` | If deploying in OpenShift, set this to true | `false` |
`serviceAccount` | Service account name for NeuVector components | `default` |
`crdwebhook.type` | crd webhook type | `ClusterIP` |

View File

@ -0,0 +1,32 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "neuvector.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "neuvector.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "neuvector.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,11 @@
# Default values for neuvector.
# This is a YAML-formatted file.
# Declare variables to be passed into the templates.
openshift: false
serviceAccount: neuvector
crdwebhook:
type: ClusterIP
enabled: true

View File

@ -0,0 +1,21 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj

View File

@ -0,0 +1,27 @@
annotations:
catalog.cattle.io/auto-install: neuvector-crd=match
catalog.cattle.io/certified: rancher
catalog.cattle.io/display-name: NeuVector
catalog.cattle.io/kube-version: '>=1.18.0-0 < 1.27.0-0'
catalog.cattle.io/namespace: cattle-neuvector-system
catalog.cattle.io/os: linux
catalog.cattle.io/permit-os: linux
catalog.cattle.io/provides-gvr: neuvector.com/v1
catalog.cattle.io/rancher-version: '>= 2.7.0-0 < 2.8.0-0'
catalog.cattle.io/release-name: neuvector
catalog.cattle.io/type: cluster-tool
catalog.cattle.io/upstream-version: 2.4.3
apiVersion: v1
appVersion: 5.1.2
description: Helm feature chart for NeuVector's core services
home: https://neuvector.com
icon: https://avatars2.githubusercontent.com/u/19367275?s=200&v=4
keywords:
- security
maintainers:
- email: support@neuvector.com
name: becitsthere
name: neuvector
sources:
- https://github.com/neuvector/neuvector
version: 102.0.1+up2.4.3

View File

@ -0,0 +1,194 @@
# NeuVector Helm Chart
Helm chart for NeuVector container security's core services.
## CRD
Because the CRD (Custom Resource Definition) policies can be deployed before NeuVector's core product, a new 'crd' helm chart is created. The crd template in the 'core' chart is kept for the backward compatibility. Please set `crdwebhook.enabled` to false, if you use the new 'crd' chart.
## Choosing container runtime
The NeuVector platform supports docker, cri-o and containerd as the container runtime. For a k3s/rke2, or bottlerocket cluster, they have their own runtime socket path. You should enable their runtime options, `k3s.enabled` and `bottlerocket.enabled`, respectively.
## Configuration
The following table lists the configurable parameters of the NeuVector chart and their default values.
Parameter | Description | Default | Notes
--------- | ----------- | ------- | -----
`openshift` | If deploying in OpenShift, set this to true | `false` |
`registry` | NeuVector container registry | `docker.io` |
`tag` | image tag for controller enforcer manager | `latest` |
`oem` | OEM release name | `nil` |
`imagePullSecrets` | image pull secret | `nil` |
`rbac` | NeuVector RBAC manifests are installed when rbac is enabled | `true` |
`psp` | NeuVector Pod Security Policy when psp policy is enabled | `false` |
`serviceAccount` | Service account name for NeuVector components | `default` |
`controller.enabled` | If true, create controller | `true` |
`controller.image.repository` | controller image repository | `neuvector/controller` |
`controller.image.hash` | controller image hash in the format of sha256:xxxx. If present it overwrites the image tag value. | |
`controller.replicas` | controller replicas | `3` |
`controller.schedulerName` | kubernetes scheduler name | `nil` |
`controller.affinity` | controller affinity rules | ... | spread controllers to different nodes |
`controller.tolerations` | List of node taints to tolerate | `nil` |
`controller.resources` | Add resources requests and limits to controller deployment | `{}` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/tree/2.4.3/charts/core/values.yaml)
`controller.nodeSelector` | Enable and specify nodeSelector labels | `{}` |
`controller.disruptionbudget` | controller PodDisruptionBudget. 0 to disable. Recommended value: 2. | `0` |
`controller.priorityClassName` | controller priorityClassName. Must exist prior to helm deployment. Leave empty to disable. | `nil` |
`controller.podLabels` | Specify the pod labels. | `{}` |
`controller.podAnnotations` | Specify the pod annotations. | `{}` |
`controller.env` | User-defined environment variables for controller. | `[]` |
`controller.ranchersso.enabled` | If true, enable Rancher single sign on | `false` | Rancher server address auto configured.|
`controller.pvc.enabled` | If true, enable persistence for controller using PVC | `false` | Require persistent volume type RWX, and storage 1Gi
`controller.pvc.accessModes` | Access modes for the created PVC. | `["ReadWriteMany"]` |
`controller.pvc.existingClaim` | If `false`, a new PVC will be created. If a string is provided, an existing PVC with this name will be used. | `false` |
`controller.pvc.storageClass` | Storage Class to be used | `default` |
`controller.pvc.capacity` | Storage capacity | `1Gi` |
`controller.azureFileShare.enabled` | If true, enable the usage of an existing or statically provisioned Azure File Share | `false` |
`controller.azureFileShare.secretName` | The name of the secret containing the Azure file share storage account name and key | `nil` |
`controller.azureFileShare.shareName` | The name of the Azure file share to use | `nil` |
`controller.apisvc.type` | Controller REST API service type | `nil` |
`controller.apisvc.annotations` | Add annotations to controller REST API service | `{}` |
`controller.apisvc.route.enabled` | If true, create a OpenShift route to expose the Controller REST API service | `false` |
`controller.apisvc.route.termination` | Specify TLS termination for OpenShift route for Controller REST API service. Possible passthrough, edge, reencrypt | `passthrough` |
`controller.apisvc.route.host` | Set controller REST API service hostname | `nil` |
`controller.apisvc.route.tls.key` | Set controller REST API service PEM format key file | `nil` |
`controller.apisvc.route.tls.certificate` | Set controller REST API service PEM format certificate file | `nil` |
`controller.apisvc.route.tls.caCertificate` | Set controller REST API service CA certificate may be required to establish a certificate chain for validation | `nil` |
`controller.apisvc.route.tls.destinationCACertificate` | Set controller REST API service CA certificate to validate the endpoint certificate | `nil` |
`controller.certificate.secret` | Replace controller REST API certificate using secret if secret name is specified | `nil` |
`controller.certificate.keyFile` | Replace controller REST API certificate key file | `tls.key` |
`controller.certificate.pemFile` | Replace controller REST API certificate pem file | `tls.pem` |
`controller.federation.mastersvc.type` | Multi-cluster primary cluster service type. If specified, the deployment will be used to manage other clusters. Possible values include NodePort, LoadBalancer and ClusterIP. | `nil` |
`controller.federation.mastersvc.annotations` | Add annotations to Multi-cluster primary cluster REST API service | `{}` |
`controller.federation.mastersvc.route.enabled` | If true, create a OpenShift route to expose the Multi-cluster primary cluster service | `false` |
`controller.federation.mastersvc.route.host` | Set OpenShift route host for primary cluster service | `nil` |
`controller.federation.mastersvc.route.termination` | Specify TLS termination for OpenShift route for Multi-cluster primary cluster service. Possible passthrough, edge, reencrypt | `passthrough` |
`controller.federation.mastersvc.route.tls.key` | Set PEM format key file for OpenShift route for Multi-cluster primary cluster service | `nil` |
`controller.federation.mastersvc.route.tls.certificate` | Set PEM format key certificate file for OpenShift route for Multi-cluster primary cluster service | `nil` |
`controller.federation.mastersvc.route.tls.caCertificate` | Set CA certificate may be required to establish a certificate chain for validation for OpenShift route for Multi-cluster primary cluster service | `nil` |
`controller.federation.mastersvc.route.tls.destinationCACertificate` | Set CA certificate to validate the endpoint certificate for OpenShift route for Multi-cluster primary cluster service | `nil` |
`controller.federation.mastersvc.ingress.enabled` | If true, create ingress for federation master service, must also set ingress host value | `false` | enable this if ingress controller is installed
`controller.federation.mastersvc.ingress.tls` | If true, TLS is enabled for controller federation master ingress service |`false` | If set, the tls-host used is the one set with `controller.federation.mastersvc.ingress.host`.
`controller.federation.mastersvc.ingress.host` | Must set this host value if ingress is enabled | `nil` |
`controller.federation.mastersvc.ingress.ingressClassName` | To be used instead of the ingress.class annotation if an IngressClass is provisioned | `""` |
`controller.federation.mastersvc.ingress.secretName` | Name of the secret to be used for TLS-encryption | `nil` | Secret must be created separately (Let's encrypt, manually)
`controller.federation.mastersvc.ingress.path` | Set ingress path |`/` | If set, it might be necessary to set a rewrite rule in annotations.
`controller.federation.mastersvc.ingress.annotations` | Add annotations to ingress to influence behavior | `nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/tree/2.4.3/charts/core/values.yaml)
`controller.federation.managedsvc.type` | Multi-cluster managed cluster service type. If specified, the deployment will be managed by the managed clsuter. Possible values include NodePort, LoadBalancer and ClusterIP. | `nil` |
`controller.federation.managedsvc.annotations` | Add annotations to Multi-cluster managed cluster REST API service | `{}` |
`controller.federation.managedsvc.route.enabled` | If true, create a OpenShift route to expose the Multi-cluster managed cluster service | `false` |
`controller.federation.managedsvc.route.host` | Set OpenShift route host for manageed service | `nil` |
`controller.federation.managedsvc.route.termination` | Specify TLS termination for OpenShift route for Multi-cluster managed cluster service. Possible passthrough, edge, reencrypt | `passthrough` |
`controller.federation.managedsvc.route.tls.key` | Set PEM format key file for OpenShift route for Multi-cluster managed cluster service | `nil` |
`controller.federation.managedsvc.route.tls.certificate` | Set PEM format certificate file for OpenShift route for Multi-cluster managed cluster service | `nil` |
`controller.federation.managedsvc.route.tls.caCertificate` | Set CA certificate may be required to establish a certificate chain for validation for OpenShift route for Multi-cluster managed cluster service | `nil` |
`controller.federation.managedsvc.route.tls.destinationCACertificate` | Set CA certificate to validate the endpoint certificate for OpenShift route for Multi-cluster managed cluster service | `nil` |
`controller.federation.managedsvc.ingress.enabled` | If true, create ingress for federation managed service, must also set ingress host value | `false` | enable this if ingress controller is installed
`controller.federation.managedsvc.ingress.tls` | If true, TLS is enabled for controller federation managed ingress service |`false` | If set, the tls-host used is the one set with `controller.federation.managedsvc.ingress.host`.
`controller.federation.managedsvc.ingress.host` | Must set this host value if ingress is enabled | `nil` |
`controller.federation.managedsvc.ingress.ingressClassName` | To be used instead of the ingress.class annotation if an IngressClass is provisioned | `""` |
`controller.federation.managedsvc.ingress.secretName` | Name of the secret to be used for TLS-encryption | `nil` | Secret must be created separately (Let's encrypt, manually)
`controller.federation.managedsvc.ingress.path` | Set ingress path |`/` | If set, it might be necessary to set a rewrite rule in annotations.
`controller.federation.managedsvc.ingress.annotations` | Add annotations to ingress to influence behavior | `nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/tree/2.4.3/charts/core/values.yaml)
`controller.ingress.enabled` | If true, create ingress for rest api, must also set ingress host value | `false` | enable this if ingress controller is installed
`controller.ingress.tls` | If true, TLS is enabled for controller rest api ingress service |`false` | If set, the tls-host used is the one set with `controller.ingress.host`.
`controller.ingress.host` | Must set this host value if ingress is enabled | `nil` |
`controller.ingress.ingressClassName` | To be used instead of the ingress.class annotation if an IngressClass is provisioned | `""` |
`controller.ingress.secretName` | Name of the secret to be used for TLS-encryption | `nil` | Secret must be created separately (Let's encrypt, manually)
`controller.ingress.path` | Set ingress path |`/` | If set, it might be necessary to set a rewrite rule in annotations.
`controller.ingress.annotations` | Add annotations to ingress to influence behavior | `nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/tree/2.4.3/charts/core/values.yaml)
`controller.configmap.enabled` | If true, configure NeuVector global settings using a ConfigMap | `false`
`controller.configmap.data` | NeuVector configuration in YAML format | `{}`
`controller.secret.enabled` | If true, configure NeuVector global settings using secrets | `false`
`controller.secret.data` | NeuVector configuration in key/value pair format | `{}`
`enforcer.enabled` | If true, create enforcer | `true` |
`enforcer.image.repository` | enforcer image repository | `neuvector/enforcer` |
`enforcer.image.hash` | enforcer image hash in the format of sha256:xxxx. If present it overwrites the image tag value. | |
`enforcer.updateStrategy.type` | enforcer update strategy type. | `RollingUpdate` |
`enforcer.priorityClassName` | enforcer priorityClassName. Must exist prior to helm deployment. Leave empty to disable. | `nil` |
`enforcer.podLabels` | Specify the pod labels. | `{}` |
`enforcer.podAnnotations` | Specify the pod annotations. | `{}` |
`enforcer.env` | User-defined environment variables for enforcers. | `[]` |
`enforcer.tolerations` | List of node taints to tolerate | `- effect: NoSchedule`<br>`key: node-role.kubernetes.io/master` | other taints can be added after the default
`enforcer.resources` | Add resources requests and limits to enforcer deployment | `{}` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/tree/2.4.3/charts/core/values.yaml)
`manager.enabled` | If true, create manager | `true` |
`manager.image.repository` | manager image repository | `neuvector/manager` |
`manager.image.hash` | manager image hash in the format of sha256:xxxx. If present it overwrites the image tag value. | |
`manager.priorityClassName` | manager priorityClassName. Must exist prior to helm deployment. Leave empty to disable. | `nil` |
`manager.podLabels` | Specify the pod labels. | `{}` |
`manager.podAnnotations` | Specify the pod annotations. | `{}` |
`manager.env.ssl` | If false, manager will listen on HTTP access instead of HTTPS | `true` |
`manager.svc.type` | set manager service type for native Kubernetes | `NodePort`;<br>if it is OpenShift platform or ingress is enabled, then default is `ClusterIP` | set to LoadBalancer if using cloud providers, such as Azure, Amazon, Google
`manager.svc.loadBalancerIP` | if manager service type is LoadBalancer, this is used to specify the load balancer's IP | `nil` |
`manager.svc.annotations` | Add annotations to manager service | `{}` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/tree/2.4.3/charts/core/values.yaml)
`manager.route.enabled` | If true, create a OpenShift route to expose the management console service | `true` |
`manager.route.host` | Set OpenShift route host for management console service | `nil` |
`manager.route.termination` | Specify TLS termination for OpenShift route for management console service. Possible passthrough, edge, reencrypt | `passthrough` |
`manager.route.tls.key` | Set PEM format key file for OpenShift route for management console service | `nil` |
`manager.route.tls.certificate` | Set PEM format certificate file for OpenShift route for management console service | `nil` |
`manager.route.tls.caCertificate` | Set CA certificate may be required to establish a certificate chain for validation for OpenShift route for management console service | `nil` |
`manager.route.tls.destinationCACertificate` | Set controller REST API service CA certificate to validate the endpoint certificate for OpenShift route for management console service | `nil` |
`manager.certificate.secret` | Replace manager UI certificate using secret if secret name is specified | `nil` |
`manager.certificate.keyFile` | Replace manager UI certificate key file | `tls.key` |
`manager.certificate.pemFile` | Replace manager UI certificate pem file | `tls.pem` |
`manager.ingress.enabled` | If true, create ingress, must also set ingress host value | `false` | enable this if ingress controller is installed
`manager.ingress.host` | Must set this host value if ingress is enabled | `nil` |
`manager.ingress.ingressClassName` | To be used instead of the ingress.class annotation if an IngressClass is provisioned | `""` |
`manager.ingress.path` | Set ingress path |`/` | If set, it might be necessary to set a rewrite rule in annotations. Currently only supports `/`
`manager.ingress.annotations` | Add annotations to ingress to influence behavior | `nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/tree/2.4.3/charts/core/values.yaml)
`manager.ingress.tls` | If true, TLS is enabled for manager ingress service |`false` | If set, the tls-host used is the one set with `manager.ingress.host`.
`manager.ingress.secretName` | Name of the secret to be used for TLS-encryption | `nil` | Secret must be created separately (Let's encrypt, manually)
`manager.resources` | Add resources requests and limits to manager deployment | `{}` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/tree/2.4.3/charts/core/values.yaml)
`manager.affinity` | manager affinity rules | `{}` |
`manager.tolerations` | List of node taints to tolerate | `nil` |
`manager.nodeSelector` | Enable and specify nodeSelector labels | `{}` |
`manager.runAsUser` | Specify the run as User ID | `nil` |
`cve.updater.enabled` | If true, create cve updater | `true` |
`cve.updater.secure` | If ture, API server's certificate is validated | `false` |
`cve.updater.image.repository` | cve updater image repository | `neuvector/updater` |
`cve.updater.image.tag` | image tag for cve updater | `latest` |
`cve.updater.image.hash` | cve updateer image hash in the format of sha256:xxxx. If present it overwrites the image tag value. | |
`cve.updater.priorityClassName` | cve updater priorityClassName. Must exist prior to helm deployment. Leave empty to disable. | `nil` |
`cve.updater.podLabels` | Specify the pod labels. | `{}` |
`cve.updater.podAnnotations` | Specify the pod annotations. | `{}` |
`cve.updater.schedule` | cronjob cve updater schedule | `0 0 * * *` |
`cve.updater.nodeSelector` | Enable and specify nodeSelector labels | `{}` |
`cve.updater.runAsUser` | Specify the run as User ID | `nil` |
`cve.scanner.enabled` | If true, cve scanners will be deployed | `true` |
`cve.scanner.image.repository` | cve scanner image repository | `neuvector/scanner` |
`cve.scanner.image.tag` | cve scanner image tag | `latest` |
`cve.scanner.image.hash` | cve scanner image hash in the format of sha256:xxxx. If present it overwrites the image tag value. | |
`cve.scanner.priorityClassName` | cve scanner priorityClassName. Must exist prior to helm deployment. Leave empty to disable. | `nil` |
`cve.scanner.podLabels` | Specify the pod labels. | `{}` |
`cve.scanner.podAnnotations` | Specify the pod annotations. | `{}` |
`cve.scanner.env` | User-defined environment variables for scanner. | `[]` |
`cve.scanner.replicas` | external scanner replicas | `3` |
`cve.scanner.dockerPath` | the remote docker socket if CI/CD integration need scan images before they are pushed to the registry | `nil` |
`cve.scanner.resources` | Add resources requests and limits to scanner deployment | `{}` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/tree/2.4.3/charts/core/values.yaml) |
`cve.scanner.affinity` | scanner affinity rules | `{}` |
`cve.scanner.tolerations` | List of node taints to tolerate | `nil` |
`cve.scanner.nodeSelector` | Enable and specify nodeSelector labels | `{}` |
`cve.scanner.runAsUser` | Specify the run as User ID | `nil` |
`docker.path` | docker path | `/var/run/docker.sock` |
`containerd.enabled` | Set to true, if the container runtime is containerd | `false` | **Note**: For k3s and rke clusters, set k3s.enabled to true instead
`containerd.path` | If containerd is enabled, this local containerd socket path will be used | `/var/run/containerd/containerd.sock` |
`crio.enabled` | Set to true, if the container runtime is cri-o | `false` |
`crio.path` | If cri-o is enabled, this local cri-o socket path will be used | `/var/run/crio/crio.sock` |
`k3s.enabled` | Set to true for k3s or rke2 | `false` |
`k3s.runtimePath` | If k3s is enabled, this local containerd socket path will be used | `/run/k3s/containerd/containerd.sock` |
`bottlerocket.enabled` | Set to true if using AWS bottlerocket | `false` |
`bottlerocket.runtimePath` | If bottlerocket is enabled, this local containerd socket path will be used | `/run/dockershim.sock` |
`admissionwebhook.type` | admission webhook type | `ClusterIP` |
`crdwebhook.enabled` | Enable crd service and create crd related resources | `true` |
`crdwebhook.type` | crd webhook type | `ClusterIP` |
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
```console
$ helm install my-release --namespace neuvector ./neuvector-helm/ --set manager.env.ssl=off
```
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
```console
$ helm install my-release --namespace neuvector ./neuvector-helm/ -f values.yaml
```

View File

@ -0,0 +1,35 @@
### Run-Time Protection Without Compromise
NeuVector delivers a complete run-time security solution with container process/file system protection and vulnerability scanning combined with the only true Layer 7 container firewall. Protect sensitive data with a complete container security platform.
NeuVector integrates tightly with Rancher and Kubernetes to extend the built-in security features for applications that require defense in depth. Security features include:
+ Build phase vulnerability scanning with Jenkins plug-in and registry scanning
+ Admission control to prevent vulnerable or unauthorized image deployments using Kubernetes admission control webhooks
+ Complete run-time scanning with network, process, and file system monitoring and protection
+ The industry's only layer 7 container firewall for multi-protocol threat detection and automated segmentation
+ Advanced network controls including DLP detection, service mesh integration, connection blocking and packet captures
+ Run-time vulnerability scanning and CIS benchmarks
Additional Notes:
+ Previous deployments from Rancher, such as from our Partners chart repository or the primary NeuVector Helm chart, must be completely removed in order to update to the new integrated feature chart. See https://github.com/rancher/rancher/issues/37447.
+ Configure correct container runtime and runtime path under container runtime. Enable only one runtime.
+ For deploying on hardened RKE2 and K3s clusters, enable PSP and set user id from other configuration for Manager, Scanner and Updater deployments. User id can be any number other than 0.
+ For deploying on hardened RKE cluster, enable PSP from security settings.
## Upgrading to Kubernetes v1.25+
Starting in Kubernetes v1.25, [Pod Security Policies](https://kubernetes.io/docs/concepts/security/pod-security-policy/) have been removed from the Kubernetes API.
As a result, **before upgrading to Kubernetes v1.25** (or on a fresh install in a Kubernetes v1.25+ cluster), users are expected to perform an in-place upgrade of this chart with `global.cattle.psp.enabled` set to `false` if it has been previously set to `true`.
**Note:**
In this chart release, any previous field that was associated with any PSP resources have been removed in favor of a single global field: `global.cattle.psp.enabled`.
**Note:**
If you upgrade your cluster to Kubernetes v1.25+ before removing PSPs via a `helm upgrade` (even if you manually clean up resources), **it will leave the Helm release in a broken state within the cluster such that further Helm operations will not work (`helm uninstall`, `helm upgrade`, etc.).**
If your charts get stuck in this state, please consult the Rancher docs on how to clean up your Helm release secrets.
Upon setting `global.cattle.psp.enabled` to false, the chart will remove any PSP resources deployed on its behalf from the cluster. This is the default setting for this chart.
As a replacement for PSPs, [Pod Security Admission](https://kubernetes.io/docs/concepts/security/pod-security-admission/) should be used. Please consult the Rancher docs for more details on how to configure your chart release namespaces to work with the new Pod Security Admission and apply Pod Security Standards.

View File

@ -0,0 +1,32 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "neuvector.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "neuvector.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "neuvector.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}

View File

@ -0,0 +1,336 @@
questions:
#image configurations
- variable: controller.image.repository
default: "neuvector/controller"
description: controller image repository
type: string
label: Controller Image Path
group: "Container Images"
- variable: controller.image.tag
default: ""
description: image tag for controller
type: string
label: Controller Image Tag
group: "Container Images"
- variable: manager.image.repository
default: "neuvector/manager"
description: manager image repository
type: string
label: Manager Image Path
group: "Container Images"
- variable: manager.image.tag
default: ""
description: image tag for manager
type: string
label: Manager Image Tag
group: "Container Images"
- variable: enforcer.image.repository
default: "neuvector/enforcer"
description: enforcer image repository
type: string
label: Enforcer Image Path
group: "Container Images"
- variable: enforcer.image.tag
default: ""
description: image tag for enforcer
type: string
label: Enforcer Image Tag
group: "Container Images"
- variable: cve.scanner.image.repository
default: "neuvector/scanner"
description: scanner image repository
type: string
label: Scanner Image Path
group: "Container Images"
- variable: cve.scanner.image.tag
default: ""
description: image tag for scanner
type: string
label: Scanner Image Tag
group: "Container Images"
- variable: cve.updater.image.repository
default: "neuvector/updater"
description: cve updater image repository
type: string
label: CVE Updater Image Path
group: "Container Images"
- variable: cve.updater.image.tag
default: ""
description: image tag for updater
type: string
label: Updater Image Tag
group: "Container Images"
#Container Runtime configurations
- variable: docker.enabled
default: true
description: Docker runtime. Enable only one runtime
type: boolean
label: Docker Runtime
show_subquestion_if: true
group: "Container Runtime"
subquestions:
- variable: docker.path
default: "/var/run/docker.sock"
description: "Docker Runtime Path"
type: string
label: Runtime Path
- variable: containerd.enabled
default: "false"
description: Containerd runtime. Enable only one runtime
type: boolean
label: Containerd Runtime
show_subquestion_if: true
group: "Container Runtime"
subquestions:
- variable: containerd.path
default: " /var/run/containerd/containerd.sock"
description: "Containerd Runtime Path"
type: string
label: Runtime Path
- variable: crio.enabled
default: "false"
description: CRI-O runtime. Enable only one runtime
type: boolean
label: CRI-O Runtime
show_subquestion_if: true
group: "Container Runtime"
subquestions:
- variable: crio.path
default: "/var/run/crio/crio.sock"
description: "CRI-O Runtime Path"
type: string
label: Runtime Path
- variable: k3s.enabled
default: "false"
description: k3s containerd runtime. Enable only one runtime. Choose this option for RKE2 and K3S based clusters
type: boolean
label: k3s Containerd Runtime
show_subquestion_if: true
group: "Container Runtime"
subquestions:
- variable: k3s.runtimePath
default: " /run/k3s/containerd/containerd.sock"
description: "k3s Containerd Runtime Path"
type: string
label: Runtime Path
#storage configurations
- variable: controller.pvc.enabled
default: false
description: If true, enable persistence for controller using PVC. PVC should support ReadWriteMany(RWX)
type: boolean
label: PVC Status
group: "PVC Configuration"
- variable: controller.pvc.storageClass
default: ""
description: Storage Class to be used
type: string
label: Storage Class Name
group: "PVC Configuration"
#ingress configurations
- variable: manager.ingress.enabled
default: false
description: If true, create ingress, must also set ingress host value
type: boolean
label: Manager Ingress Status
group: "Ingress Configuration"
show_subquestion_if: true
subquestions:
- variable: manager.ingress.host
default: ""
description: Must set this host value if ingress is enabled
type: string
label: Manager Ingress Host
group: "Ingress Configuration"
- variable: manager.ingress.path
default: "/"
description: Set ingress path
type: string
label: Manager Ingress Path
group: "Ingress Configuration"
- variable: manager.ingress.annotations
default: "{}"
description: Add annotations to ingress to influence behavior. Please use the 'Edit as YAML' feature in the Rancher UI to add single or multiple lines of annotation
type: string
label: Manager Ingress Annotations
group: "Ingress Configuration"
- variable: controller.ingress.enabled
default: false
description: If true, create ingress for rest api, must also set ingress host value
type: boolean
label: Controller Ingress Status
group: "Ingress Configuration"
show_subquestion_if: true
subquestions:
- variable: controller.ingress.host
default: ""
description: Must set this host value if ingress is enabled
type: string
label: Controller Ingress Host
group: "Ingress Configuration"
- variable: controller.ingress.path
default: "/"
description: Set ingress path
type: string
label: Controller Ingress Path
group: "Ingress Configuration"
- variable: controller.ingress.annotations
default: "{}"
description: Add annotations to ingress to influence behavior. Please use the 'Edit as YAML' feature in the Rancher UI to add single or multiple lines of annotation
type: string
label: Controller Ingress Annotations
group: "Ingress Configuration"
- variable: controller.federation.mastersvc.ingress.enabled
default: false
description: If true, create ingress for rest api, must also set ingress host value
type: boolean
label: Controller Federation Master Service Ingress Status
group: "Ingress Configuration"
show_subquestion_if: true
subquestions:
- variable: controller.federation.mastersvc.ingress.tls
default: false
description: If true, TLS is enabled for controller federation master ingress service
type: boolean
label: Controller Federation Master Service Ingress TLS Status
group: "Ingress Configuration"
- variable: controller.federation.mastersvc.ingress.host
default: ""
description: Must set this host value if ingress is enabled
type: string
label: Controller Federation Master Service Ingress Host
group: "Ingress Configuration"
- variable: controller.federation.mastersvc.ingress.path
default: "/"
description: Set ingress path
type: string
label: Controller Federation Master Service Ingress Path
group: "Ingress Configuration"
- variable: controller.federation.mastersvc.ingress.ingressClassName
default: ""
description: To be used instead of the ingress.class annotation if an IngressClass is provisioned
type: string
label: Controller Federation Master Service Ingress IngressClassName
group: "Ingress Configuration"
- variable: controller.federation.mastersvc.ingress.secretName
default: ""
description: Name of the secret to be used for TLS-encryption
type: string
label: Controller Federation Master Service Ingress SecretName
group: "Ingress Configuration"
- variable: controller.federation.mastersvc.ingress.annotations
default: "{}"
description: Add annotations to ingress to influence behavior. Please use the 'Edit as YAML' feature in the Rancher UI to add single or multiple lines of annotation
type: string
label: Controller Federation Master Service Ingress Annotations
group: "Ingress Configuration"
- variable: controller.federation.managedsvc.ingress.enabled
default: false
description: If true, create ingress for rest api, must also set ingress host value
type: boolean
label: Controller Federation Managed Service Ingress Status
group: "Ingress Configuration"
show_subquestion_if: true
subquestions:
- variable: controller.federation.managedsvc.ingress.tls
default: false
description: If true, TLS is enabled for controller federation managed ingress service
type: boolean
label: Controller Federation Managed Service Ingress TLS Status
group: "Ingress Configuration"
- variable: controller.federation.managedsvc.ingress.host
default: ""
description: Must set this host value if ingress is enabled
type: string
label: Controller Federation Managed Service Ingress Host
group: "Ingress Configuration"
- variable: controller.federation.managedsvc.ingress.path
default: "/"
description: Set ingress path
type: string
label: Controller Federation Managed Service Ingress Path
group: "Ingress Configuration"
- variable: controller.federation.managedsvc.ingress.ingressClassName
default: ""
description: To be used instead of the ingress.class annotation if an IngressClass is provisioned
type: string
label: Controller Federation Managed Service Ingress IngressClassName
group: "Ingress Configuration"
- variable: controller.federation.managedsvc.ingress.secretName
default: ""
description: Name of the secret to be used for TLS-encryption
type: string
label: Controller Federation Managed Service Ingress SecretName
group: "Ingress Configuration"
- variable: controller.federation.managedsvc.ingress.annotations
default: "{}"
description: Add annotations to ingress to influence behavior. Please use the 'Edit as YAML' feature in the Rancher UI to add single or multiple lines of annotation
type: string
label: Controller Federation Managed Service Ingress Annotations
group: "Ingress Configuration"
#service configurations
- variable: manager.svc.type
default: "NodePort"
description: Set manager service type for native Kubernetes
type: enum
label: Manager Service Type
group: "Service Configuration"
options:
- "NodePort"
- "ClusterIP"
- "LoadBalancer"
- variable: controller.federation.mastersvc.type
default: ""
description: Multi-cluster master cluster service type. If specified, the deployment will be used to manage other clusters. Possible values include NodePort, LoadBalancer and ClusterIP
type: enum
label: Fed Master Service Type
group: "Service Configuration"
options:
- "NodePort"
- "ClusterIP"
- "LoadBalancer"
- variable: controller.federation.managedsvc.type
default: ""
description: Multi-cluster managed cluster service type. If specified, the deployment will be managed by the master clsuter. Possible values include NodePort, LoadBalancer and ClusterIP
type: enum
label: Fed Managed Service Type
group: "Service Configuration"
options:
- "NodePort"
- "ClusterIP"
- "LoadBalancer"
- variable: controller.apisvc.type
default: "NodePort"
description: Controller REST API service type
type: enum
label: Controller REST API Service Type
group: "Service Configuration"
options:
- "NodePort"
- "ClusterIP"
- "LoadBalancer"
#Security Settings
- variable: global.cattle.psp.enabled
default: "false"
description: "Flag to enable or disable the installation of PodSecurityPolicies by this chart in the target cluster. If the cluster is running Kubernetes 1.25+, you must update this value to false."
label: "Enable PodSecurityPolicies"
default: "false"
type: boolean
group: "Security Settings"
- variable: manager.runAsUser
default: ""
description: Specify the run as User ID
type: int
label: Manager runAsUser ID
group: "Security Settings"
- variable: cve.scanner.runAsUser
default: ""
description: Specify the run as User ID
type: int
label: Scanner runAsUser ID
group: "Security Settings"
- variable: cve.updater.runAsUser
default: ""
description: Specify the run as User ID
type: int
label: Updater runAsUser ID
group: "Security Settings"

View File

@ -0,0 +1,20 @@
{{- if and .Values.manager.enabled .Values.manager.ingress.enabled }}
From outside the cluster, the NeuVector URL is:
http://{{ .Values.manager.ingress.host }}
{{- else if not .Values.openshift }}
Get the NeuVector URL by running these commands:
{{- if contains "NodePort" .Values.manager.svc.type }}
NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services neuvector-service-webui)
NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo https://$NODE_IP:$NODE_PORT
{{- else if contains "ClusterIP" .Values.manager.svc.type }}
CLUSTER_IP=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.clusterIP}" services neuvector-service-webui)
echo https://$CLUSTER_IP:8443
{{- else if contains "LoadBalancer" .Values.manager.svc.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
Watch the status by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w neuvector-service-webui'
SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} neuvector-service-webui -o jsonpath="{.status.loadBalancer.ingress[0].ip}")
echo https://$SERVICE_IP:8443
{{- end }}
{{- end }}

View File

@ -0,0 +1,40 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "neuvector.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "neuvector.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "neuvector.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,18 @@
apiVersion: v1
kind: Service
metadata:
name: neuvector-svc-admission-webhook
namespace: {{ .Release.Namespace }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
ports:
- port: 443
targetPort: 20443
protocol: TCP
name: admission-webhook
type: {{ .Values.admissionwebhook.type }}
selector:
app: neuvector-controller-pod

View File

@ -0,0 +1,33 @@
{{- if .Values.internal.certmanager.enabled }}
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: {{ .Values.internal.certmanager.secretname }}
namespace: {{ .Release.Namespace }}
spec:
selfSigned: {}
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: {{ .Values.internal.certmanager.secretname }}
namespace: {{ .Release.Namespace }}
spec:
duration: 17520h # 2 years
subject:
organizations:
- NeuVector
isCA: true
commonName: neuvector.internal
dnsNames:
- neuvector.internal
- NeuVector
secretName: {{ .Values.internal.certmanager.secretname }}
usages:
- digital signature
- key encipherment
issuerRef:
group: cert-manager.io
kind: Issuer
name: {{ .Values.internal.certmanager.secretname }}
{{- end }}

View File

@ -0,0 +1,121 @@
{{- if .Values.rbac -}}
{{- $oc4 := and .Values.openshift (semverCompare ">=1.12-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) -}}
{{- $oc3 := and .Values.openshift (not $oc4) (semverCompare ">=1.9-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) -}}
{{- if $oc3 }}
apiVersion: authorization.openshift.io/v1
{{- else if (semverCompare ">=1.8-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
apiVersion: rbac.authorization.k8s.io/v1
{{- else }}
apiVersion: v1
{{- end }}
kind: ClusterRole
metadata:
name: neuvector-binding-app
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
rules:
- apiGroups:
- ""
resources:
- nodes
- pods
- services
- namespaces
verbs:
- get
- list
- watch
- update
---
{{- if $oc3 }}
apiVersion: authorization.openshift.io/v1
{{- else if (semverCompare ">=1.8-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
apiVersion: rbac.authorization.k8s.io/v1
{{- else }}
apiVersion: v1
{{- end }}
kind: ClusterRole
metadata:
name: neuvector-binding-rbac
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
rules:
{{- if .Values.openshift }}
- apiGroups:
- image.openshift.io
resources:
- imagestreams
verbs:
- get
- list
- watch
{{- end }}
- apiGroups:
- rbac.authorization.k8s.io
resources:
- rolebindings
- roles
- clusterrolebindings
- clusterroles
verbs:
- get
- list
- watch
---
{{- if $oc3 }}
apiVersion: authorization.openshift.io/v1
{{- else if (semverCompare ">=1.8-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
apiVersion: rbac.authorization.k8s.io/v1
{{- else }}
apiVersion: v1
{{- end }}
kind: ClusterRole
metadata:
name: neuvector-binding-admission
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
rules:
- apiGroups:
- admissionregistration.k8s.io
resources:
- validatingwebhookconfigurations
- mutatingwebhookconfigurations
verbs:
- get
- list
- watch
- create
- update
- delete
---
{{- if $oc4 }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: neuvector-binding-co
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
rules:
- apiGroups:
- config.openshift.io
resources:
- clusteroperators
verbs:
- get
- list
{{- end }}
{{- end }}

View File

@ -0,0 +1,147 @@
{{- if .Values.rbac -}}
{{- $oc4 := and .Values.openshift (semverCompare ">=1.12-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) -}}
{{- $oc3 := and .Values.openshift (not $oc4) (semverCompare ">=1.9-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) -}}
{{- if $oc3 }}
apiVersion: authorization.openshift.io/v1
{{- else if (semverCompare ">=1.8-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
apiVersion: rbac.authorization.k8s.io/v1
{{- else }}
apiVersion: v1
{{- end }}
kind: ClusterRoleBinding
metadata:
name: neuvector-binding-app
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
roleRef:
{{- if not $oc3 }}
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
{{- end }}
name: neuvector-binding-app
subjects:
- kind: ServiceAccount
name: {{ .Values.serviceAccount }}
namespace: {{ .Release.Namespace }}
{{- if $oc3 }}
userNames:
- system:serviceaccount:{{ .Release.Namespace }}:{{ .Values.serviceAccount }}
{{- end }}
---
{{- if $oc3 }}
apiVersion: authorization.openshift.io/v1
{{- else if (semverCompare ">=1.8-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
apiVersion: rbac.authorization.k8s.io/v1
{{- else }}
apiVersion: v1
{{- end }}
kind: ClusterRoleBinding
metadata:
name: neuvector-binding-rbac
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
roleRef:
{{- if not $oc3 }}
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
{{- end }}
name: neuvector-binding-rbac
subjects:
- kind: ServiceAccount
name: {{ .Values.serviceAccount }}
namespace: {{ .Release.Namespace }}
{{- if $oc3 }}
userNames:
- system:serviceaccount:{{ .Release.Namespace }}:{{ .Values.serviceAccount }}
{{- end }}
---
{{- if $oc3 }}
apiVersion: authorization.openshift.io/v1
{{- else if (semverCompare ">=1.8-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
apiVersion: rbac.authorization.k8s.io/v1
{{- else }}
apiVersion: v1
{{- end }}
kind: ClusterRoleBinding
metadata:
name: neuvector-binding-admission
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
roleRef:
{{- if not $oc3 }}
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
{{- end }}
name: neuvector-binding-admission
subjects:
- kind: ServiceAccount
name: {{ .Values.serviceAccount }}
namespace: {{ .Release.Namespace }}
{{- if $oc3 }}
userNames:
- system:serviceaccount:{{ .Release.Namespace }}:{{ .Values.serviceAccount }}
{{- end }}
---
{{- if $oc3 }}
apiVersion: authorization.openshift.io/v1
{{- else if (semverCompare ">=1.8-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
apiVersion: rbac.authorization.k8s.io/v1
{{- else }}
apiVersion: v1
{{- end }}
kind: ClusterRoleBinding
metadata:
name: neuvector-binding-view
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
roleRef:
{{- if not $oc3 }}
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
{{- end }}
name: view
subjects:
- kind: ServiceAccount
name: {{ .Values.serviceAccount }}
namespace: {{ .Release.Namespace }}
{{- if $oc3 }}
userNames:
- system:serviceaccount:{{ .Release.Namespace }}:{{ .Values.serviceAccount }}
{{- end }}
---
{{- if $oc4 }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: neuvector-binding-co
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: neuvector-binding-co
subjects:
- kind: ServiceAccount
name: {{ .Values.serviceAccount }}
namespace: {{ .Release.Namespace }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,235 @@
{{- if .Values.controller.enabled -}}
{{- if (semverCompare ">=1.9-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
apiVersion: apps/v1
{{- else }}
apiVersion: extensions/v1beta1
{{- end }}
kind: Deployment
metadata:
name: neuvector-controller-pod
namespace: {{ .Release.Namespace }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- with .Values.controller.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
replicas: {{ .Values.controller.replicas }}
minReadySeconds: 60
strategy:
{{ toYaml .Values.controller.strategy | indent 4 }}
selector:
matchLabels:
app: neuvector-controller-pod
template:
metadata:
labels:
app: neuvector-controller-pod
release: {{ .Release.Name }}
{{- with .Values.controller.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if or .Values.controller.secret.enabled .Values.controller.configmap.enabled .Values.controller.podAnnotations }}
annotations:
{{- if .Values.controller.secret.enabled }}
checksum/init-secret: {{ include (print $.Template.BasePath "/init-secret.yaml") . | sha256sum }}
{{- end }}
{{- if .Values.controller.configmap.enabled }}
checksum/init-configmap: {{ include (print $.Template.BasePath "/init-configmap.yaml") . | sha256sum }}
{{- end }}
{{- if .Values.controller.podAnnotations }}
{{- toYaml .Values.controller.podAnnotations | nindent 8 }}
{{- end }}
{{- end }}
spec:
{{- if .Values.controller.affinity }}
affinity:
{{ toYaml .Values.controller.affinity | indent 8 }}
{{- end }}
{{- if .Values.controller.tolerations }}
tolerations:
{{ toYaml .Values.controller.tolerations | indent 8 }}
{{- end }}
{{- if .Values.controller.nodeSelector }}
nodeSelector:
{{ toYaml .Values.controller.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.controller.schedulerName }}
schedulerName: {{ .Values.controller.schedulerName }}
{{- end }}
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
- name: {{ .Values.imagePullSecrets }}
{{- end }}
{{- if .Values.controller.priorityClassName }}
priorityClassName: {{ .Values.controller.priorityClassName }}
{{- end }}
serviceAccountName: {{ .Values.serviceAccount }}
serviceAccount: {{ .Values.serviceAccount }}
containers:
- name: neuvector-controller-pod
image: {{ template "system_default_registry" . }}{{ .Values.controller.image.repository }}:{{ .Values.controller.image.tag }}
securityContext:
privileged: true
resources:
{{- if .Values.controller.resources }}
{{ toYaml .Values.controller.resources | indent 12 }}
{{- else }}
{{ toYaml .Values.resources | indent 12 }}
{{- end }}
readinessProbe:
exec:
command:
- cat
- /tmp/ready
initialDelaySeconds: 5
periodSeconds: 5
env:
- name: CLUSTER_JOIN_ADDR
value: neuvector-svc-controller.{{ .Release.Namespace }}
- name: CLUSTER_ADVERTISED_ADDR
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: CLUSTER_BIND_ADDR
valueFrom:
fieldRef:
fieldPath: status.podIP
{{- if .Values.controller.ranchersso.enabled }}
- name: RANCHER_SSO
value: "1"
- name: RANCHER_EP
value: "{{ .Values.global.cattle.url }}"
{{- end }}
{{- if or .Values.controller.pvc.enabled .Values.controller.azureFileShare.enabled }}
- name: CTRL_PERSIST_CONFIG
value: "1"
{{- end }}
{{- with .Values.controller.env }}
{{- toYaml . | nindent 12 }}
{{- end }}
volumeMounts:
- mountPath: /var/neuvector
name: nv-share
readOnly: false
{{- if .Values.containerd.enabled }}
- mountPath: /var/run/containerd/containerd.sock
{{- else if .Values.k3s.enabled }}
- mountPath: /var/run/containerd/containerd.sock
{{- else if .Values.bottlerocket.enabled }}
- mountPath: /var/run/containerd/containerd.sock
{{- else if .Values.crio.enabled }}
- mountPath: /var/run/crio/crio.sock
{{- else }}
- mountPath: /var/run/docker.sock
{{- end }}
name: runtime-sock
readOnly: true
- mountPath: /host/proc
name: proc-vol
readOnly: true
- mountPath: /host/cgroup
name: cgroup-vol
readOnly: true
- mountPath: /etc/config
name: config-volume
readOnly: true
{{- if .Values.controller.certificate.secret }}
- mountPath: /etc/neuvector/certs/ssl-cert.key
subPath: {{ .Values.controller.certificate.keyFile }}
name: cert
readOnly: true
- mountPath: /etc/neuvector/certs/ssl-cert.pem
subPath: {{ .Values.controller.certificate.pemFile }}
name: cert
readOnly: true
{{- end }}
{{- if .Values.internal.certmanager.enabled }}
- mountPath: /etc/neuvector/certs/internal/cert.key
subPath: {{ .Values.controller.internal.certificate.keyFile }}
name: internal-cert
readOnly: true
- mountPath: /etc/neuvector/certs/internal/cert.pem
subPath: {{ .Values.controller.internal.certificate.pemFile }}
name: internal-cert
readOnly: true
- mountPath: /etc/neuvector/certs/internal/ca.cert
subPath: {{ .Values.controller.internal.certificate.caFile }}
name: internal-cert
readOnly: true
{{- end }}
terminationGracePeriodSeconds: 300
restartPolicy: Always
volumes:
- name: nv-share
{{- if .Values.controller.pvc.enabled }}
persistentVolumeClaim:
claimName: {{ .Values.controller.pvc.existingClaim | default "neuvector-data" }}
{{- else if .Values.controller.azureFileShare.enabled }}
azureFile:
secretName: {{ .Values.controller.azureFileShare.secretName }}
shareName: {{ .Values.controller.azureFileShare.shareName }}
readOnly: false
{{- else }}
hostPath:
path: /var/neuvector
{{- end }}
- name: runtime-sock
hostPath:
{{- if .Values.containerd.enabled }}
path: {{ .Values.containerd.path }}
{{- else if .Values.crio.enabled }}
path: {{ .Values.crio.path }}
{{- else if .Values.k3s.enabled }}
path: {{ .Values.k3s.runtimePath }}
{{- else if .Values.bottlerocket.enabled }}
path: {{ .Values.bottlerocket.runtimePath }}
{{- else }}
path: {{ .Values.docker.path }}
{{- end }}
- name: proc-vol
hostPath:
path: /proc
- name: cgroup-vol
hostPath:
path: /sys/fs/cgroup
- name: config-volume
projected:
sources:
- configMap:
name: neuvector-init
optional: true
- secret:
name: neuvector-init
optional: true
{{- if .Values.controller.certificate.secret }}
- name: cert
secret:
secretName: {{ .Values.controller.certificate.secret }}
{{- end }}
{{- if .Values.internal.certmanager.enabled }}
- name: internal-cert
secret:
secretName: {{ .Values.controller.internal.certificate.secret }}
{{- end }}
{{- if gt (int .Values.controller.disruptionbudget) 0 }}
---
{{- if (semverCompare ">=1.21-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
apiVersion: policy/v1
{{- else }}
apiVersion: policy/v1beta1
{{- end }}
kind: PodDisruptionBudget
metadata:
name: neuvector-controller-pdb
namespace: {{ .Release.Namespace }}
spec:
minAvailable: {{ .Values.controller.disruptionbudget }}
selector:
matchLabels:
app: neuvector-controller-pod
{{- end }}
{{- end }}

View File

@ -0,0 +1,219 @@
{{- if .Values.controller.enabled }}
{{- if .Values.controller.ingress.enabled }}
{{- if (semverCompare ">=1.19-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: neuvector-restapi-ingress
namespace: {{ .Release.Namespace }}
{{- with .Values.controller.ingress.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
{{- if .Values.controller.ingress.ingressClassName }}
ingressClassName: {{ .Values.controller.ingress.ingressClassName | quote }}
{{ end }}
{{- if .Values.controller.ingress.tls }}
tls:
- hosts:
- {{ .Values.controller.ingress.host }}
{{- if .Values.controller.ingress.secretName }}
secretName: {{ .Values.controller.ingress.secretName }}
{{- end }}
{{- end }}
rules:
- host: {{ .Values.controller.ingress.host }}
http:
paths:
- path: {{ .Values.controller.ingress.path }}
pathType: Prefix
backend:
service:
name: neuvector-svc-controller-api
port:
number: 10443
{{- else }}
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: neuvector-restapi-ingress
namespace: {{ .Release.Namespace }}
{{- with .Values.controller.ingress.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
{{- if .Values.controller.ingress.tls }}
tls:
- hosts:
- {{ .Values.controller.ingress.host }}
{{- if .Values.controller.ingress.secretName }}
secretName: {{ .Values.controller.ingress.secretName }}
{{- end }}
{{- end }}
rules:
- host: {{ .Values.controller.ingress.host }}
http:
paths:
- path: {{ .Values.controller.ingress.path }}
backend:
serviceName: neuvector-svc-controller-api
servicePort: 10443
{{- end }}
{{- end }}
{{- if .Values.controller.federation.mastersvc.ingress.enabled }}
{{- if (semverCompare ">=1.19-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: neuvector-mastersvc-ingress
namespace: {{ .Release.Namespace }}
{{- with .Values.controller.federation.mastersvc.ingress.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
{{- if .Values.controller.federation.mastersvc.ingress.ingressClassName }}
ingressClassName: {{ .Values.controller.federation.mastersvc.ingress.ingressClassName | quote }}
{{ end }}
{{- if .Values.controller.federation.mastersvc.ingress.tls }}
tls:
- hosts:
- {{ .Values.controller.federation.mastersvc.ingress.host }}
{{- if .Values.controller.federation.mastersvc.ingress.secretName }}
secretName: {{ .Values.controller.federation.mastersvc.ingress.secretName }}
{{- end }}
{{- end }}
rules:
- host: {{ .Values.controller.federation.mastersvc.ingress.host }}
http:
paths:
- path: {{ .Values.controller.federation.mastersvc.ingress.path }}
pathType: Prefix
backend:
service:
name: neuvector-svc-controller-fed-master
port:
number: 11443
{{- else }}
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: neuvector-mastersvc-ingress
namespace: {{ .Release.Namespace }}
{{- with .Values.controller.federation.mastersvc.ingress.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
{{- if .Values.controller.federation.mastersvc.ingress.tls }}
tls:
- hosts:
- {{ .Values.controller.federation.mastersvc.ingress.host }}
{{- if .Values.controller.federation.mastersvc.ingress.secretName }}
secretName: {{ .Values.controller.federation.mastersvc.ingress.secretName }}
{{- end }}
{{- end }}
rules:
- host: {{ .Values.controller.federation.mastersvc.ingress.host }}
http:
paths:
- path: {{ .Values.controller.federation.mastersvc.ingress.path }}
backend:
serviceName: neuvector-svc-controller-fed-master
servicePort: 11443
{{- end }}
{{- end }}
{{- if .Values.controller.federation.managedsvc.ingress.enabled }}
{{- if (semverCompare ">=1.19-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: neuvector-managedsvc-ingress
namespace: {{ .Release.Namespace }}
{{- with .Values.controller.federation.managedsvc.ingress.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
{{- if .Values.controller.federation.managedsvc.ingress.ingressClassName }}
ingressClassName: {{ .Values.controller.federation.managedsvc.ingress.ingressClassName | quote }}
{{ end }}
{{- if .Values.controller.federation.managedsvc.ingress.tls }}
tls:
- hosts:
- {{ .Values.controller.federation.managedsvc.ingress.host }}
{{- if .Values.controller.federation.managedsvc.ingress.secretName }}
secretName: {{ .Values.controller.federation.managedsvc.ingress.secretName }}
{{- end }}
{{- end }}
rules:
- host: {{ .Values.controller.federation.managedsvc.ingress.host }}
http:
paths:
- path: {{ .Values.controller.federation.managedsvc.ingress.path }}
pathType: Prefix
backend:
service:
name: neuvector-svc-controller-fed-managed
port:
number: 10443
{{- else }}
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: neuvector-managedsvc-ingress
namespace: {{ .Release.Namespace }}
{{- with .Values.controller.federation.managedsvc.ingress.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
{{- if .Values.controller.federation.managedsvc.ingress.tls }}
tls:
- hosts:
- {{ .Values.controller.federation.managedsvc.ingress.host }}
{{- if .Values.controller.federation.managedsvc.ingress.secretName }}
secretName: {{ .Values.controller.federation.managedsvc.ingress.secretName }}
{{- end }}
{{- end }}
rules:
- host: {{ .Values.controller.federation.managedsvc.ingress.host }}
http:
paths:
- path: {{ .Values.controller.federation.managedsvc.ingress.path }}
backend:
serviceName: neuvector-svc-controller-fed-managed
servicePort: 10443
{{- end }}
{{- end }}
{{- end -}}

View File

@ -0,0 +1,98 @@
{{- if .Values.openshift -}}
{{- if .Values.controller.apisvc.route.enabled }}
{{- if (semverCompare ">=1.9-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
apiVersion: route.openshift.io/v1
{{- else }}
apiVersion: v1
{{- end }}
kind: Route
metadata:
name: neuvector-route-api
namespace: {{ .Release.Namespace }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
{{- if .Values.controller.apisvc.route.host }}
host: {{ .Values.controller.apisvc.route.host }}
{{- end }}
to:
kind: Service
name: neuvector-svc-controller-api
port:
targetPort: controller-api
tls:
termination: {{ .Values.controller.apisvc.route.termination }}
{{- if or (eq .Values.controller.apisvc.route.termination "reencrypt") (eq .Values.controller.apisvc.route.termination "edge") }}
{{- with .Values.controller.apisvc.route.tls }}
{{ toYaml . | indent 4 }}
{{- end }}
{{- end }}
---
{{ end -}}
{{- if .Values.controller.federation.mastersvc.route.enabled }}
{{- if (semverCompare ">=1.9-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
apiVersion: route.openshift.io/v1
{{- else }}
apiVersion: v1
{{- end }}
kind: Route
metadata:
name: neuvector-route-fed-master
namespace: {{ .Release.Namespace }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
{{- if .Values.controller.federation.mastersvc.route.host }}
host: {{ .Values.controller.federation.mastersvc.route.host }}
{{- end }}
to:
kind: Service
name: neuvector-svc-controller-fed-master
port:
targetPort: fed
tls:
termination: {{ .Values.controller.federation.mastersvc.route.termination }}
{{- if or (eq .Values.controller.federation.mastersvc.route.termination "reencrypt") (eq .Values.controller.federation.mastersvc.route.termination "edge") }}
{{- with .Values.controller.federation.mastersvc.route.tls }}
{{ toYaml . | indent 4 }}
{{- end }}
{{- end }}
---
{{ end -}}
{{- if .Values.controller.federation.managedsvc.route.enabled }}
{{- if (semverCompare ">=1.9-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
apiVersion: route.openshift.io/v1
{{- else }}
apiVersion: v1
{{- end }}
kind: Route
metadata:
name: neuvector-route-fed-managed
namespace: {{ .Release.Namespace }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
{{- if .Values.controller.federation.managedsvc.route.host }}
host: {{ .Values.controller.federation.managedsvc.route.host }}
{{- end }}
to:
kind: Service
name: neuvector-svc-controller-fed-managed
port:
targetPort: fed
tls:
termination: {{ .Values.controller.federation.managedsvc.route.termination }}
{{- if or (eq .Values.controller.federation.managedsvc.route.termination "reencrypt") (eq .Values.controller.federation.managedsvc.route.termination "edge") }}
{{- with .Values.controller.federation.managedsvc.route.tls }}
{{ toYaml . | indent 4 }}
{{- end }}
{{- end }}
{{ end -}}
{{- end -}}

View File

@ -0,0 +1,97 @@
{{- if .Values.controller.enabled -}}
apiVersion: v1
kind: Service
metadata:
name: neuvector-svc-controller
namespace: {{ .Release.Namespace }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
clusterIP: None
ports:
- port: 18300
protocol: "TCP"
name: "cluster-tcp-18300"
- port: 18301
protocol: "TCP"
name: "cluster-tcp-18301"
- port: 18301
protocol: "UDP"
name: "cluster-udp-18301"
selector:
app: neuvector-controller-pod
{{- if .Values.controller.apisvc.type }}
---
apiVersion: v1
kind: Service
metadata:
name: neuvector-svc-controller-api
namespace: {{ .Release.Namespace }}
{{- with .Values.controller.apisvc.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
type: {{ .Values.controller.apisvc.type }}
ports:
- port: 10443
protocol: "TCP"
name: "controller-api"
selector:
app: neuvector-controller-pod
{{ end -}}
{{- if .Values.controller.federation.mastersvc.type }}
---
apiVersion: v1
kind: Service
metadata:
name: neuvector-svc-controller-fed-master
namespace: {{ .Release.Namespace }}
{{- with .Values.controller.federation.mastersvc.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
type: {{ .Values.controller.federation.mastersvc.type }}
ports:
- port: 11443
name: fed
protocol: TCP
selector:
app: neuvector-controller-pod
{{ end -}}
{{- if .Values.controller.federation.managedsvc.type }}
---
apiVersion: v1
kind: Service
metadata:
name: neuvector-svc-controller-fed-managed
namespace: {{ .Release.Namespace }}
{{- with .Values.controller.federation.managedsvc.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
type: {{ .Values.controller.federation.managedsvc.type }}
ports:
- port: 10443
name: fed
protocol: TCP
selector:
app: neuvector-controller-pod
{{ end -}}
{{- end -}}

View File

@ -0,0 +1,139 @@
{{- if .Values.enforcer.enabled -}}
{{- if (semverCompare ">=1.9-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
apiVersion: apps/v1
{{- else }}
apiVersion: extensions/v1beta1
{{- end }}
kind: DaemonSet
metadata:
name: neuvector-enforcer-pod
namespace: {{ .Release.Namespace }}
labels:
chart: {{ template "neuvector.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
spec:
updateStrategy: {{- toYaml .Values.enforcer.updateStrategy | nindent 4 }}
selector:
matchLabels:
app: neuvector-enforcer-pod
template:
metadata:
labels:
app: neuvector-enforcer-pod
release: {{ .Release.Name }}
{{- with .Values.enforcer.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.enforcer.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
- name: {{ .Values.imagePullSecrets }}
{{- end }}
{{- if .Values.enforcer.tolerations }}
tolerations:
{{ toYaml .Values.enforcer.tolerations | indent 8 }}
{{- end }}
hostPID: true
{{- if .Values.enforcer.priorityClassName }}
priorityClassName: {{ .Values.enforcer.priorityClassName }}
{{- end }}
serviceAccountName: {{ .Values.serviceAccount }}
serviceAccount: {{ .Values.serviceAccount }}
containers:
- name: neuvector-enforcer-pod
image: {{ template "system_default_registry" . }}{{ .Values.enforcer.image.repository }}:{{ .Values.enforcer.image.tag }}
securityContext:
privileged: true
resources:
{{- if .Values.enforcer.resources }}
{{ toYaml .Values.enforcer.resources | indent 12 }}
{{- else }}
{{ toYaml .Values.resources | indent 12 }}
{{- end }}
env:
- name: CLUSTER_JOIN_ADDR
value: neuvector-svc-controller.{{ .Release.Namespace }}
- name: CLUSTER_ADVERTISED_ADDR
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: CLUSTER_BIND_ADDR
valueFrom:
fieldRef:
fieldPath: status.podIP
{{- with .Values.enforcer.env }}
{{- toYaml . | nindent 12 }}
{{- end }}
volumeMounts:
{{- if .Values.containerd.enabled }}
- mountPath: /var/run/containerd/containerd.sock
{{- else if .Values.k3s.enabled }}
- mountPath: /var/run/containerd/containerd.sock
{{- else if .Values.bottlerocket.enabled }}
- mountPath: /var/run/containerd/containerd.sock
{{- else if .Values.crio.enabled }}
- mountPath: /var/run/crio/crio.sock
{{- else }}
- mountPath: /var/run/docker.sock
{{- end }}
name: runtime-sock
readOnly: true
- mountPath: /host/proc
name: proc-vol
readOnly: true
- mountPath: /host/cgroup
name: cgroup-vol
readOnly: true
- mountPath: /lib/modules
name: modules-vol
readOnly: true
{{- if .Values.internal.certmanager.enabled }}
- mountPath: /etc/neuvector/certs/internal/cert.key
subPath: {{ .Values.enforcer.internal.certificate.keyFile }}
name: internal-cert
readOnly: true
- mountPath: /etc/neuvector/certs/internal/cert.pem
subPath: {{ .Values.enforcer.internal.certificate.pemFile }}
name: internal-cert
readOnly: true
- mountPath: /etc/neuvector/certs/internal/ca.cert
subPath: {{ .Values.enforcer.internal.certificate.caFile }}
name: internal-cert
readOnly: true
{{- end }}
terminationGracePeriodSeconds: 1200
restartPolicy: Always
volumes:
- name: runtime-sock
hostPath:
{{- if .Values.containerd.enabled }}
path: {{ .Values.containerd.path }}
{{- else if .Values.crio.enabled }}
path: {{ .Values.crio.path }}
{{- else if .Values.k3s.enabled }}
path: {{ .Values.k3s.runtimePath }}
{{- else if .Values.bottlerocket.enabled }}
path: {{ .Values.bottlerocket.runtimePath }}
{{- else }}
path: {{ .Values.docker.path }}
{{- end }}
- name: proc-vol
hostPath:
path: /proc
- name: cgroup-vol
hostPath:
path: /sys/fs/cgroup
- name: modules-vol
hostPath:
path: /lib/modules
{{- if .Values.internal.certmanager.enabled }}
- name: internal-cert
secret:
secretName: {{ .Values.enforcer.internal.certificate.secret }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,13 @@
{{- if .Values.controller.configmap.enabled }}
apiVersion: v1
kind: ConfigMap
metadata:
name: neuvector-init
namespace: {{ .Release.Namespace }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
data:
{{ toYaml .Values.controller.configmap.data | indent 4 }}
{{- end }}

View File

@ -0,0 +1,15 @@
{{- if .Values.controller.secret.enabled }}
apiVersion: v1
kind: Secret
metadata:
name: neuvector-init
namespace: {{ .Release.Namespace }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
data:
{{- range $key, $val := .Values.controller.secret.data }}
{{ $key }}: | {{ toYaml $val | b64enc | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,92 @@
{{- if .Values.manager.enabled -}}
{{- if (semverCompare ">=1.9-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
apiVersion: apps/v1
{{- else }}
apiVersion: extensions/v1beta1
{{- end }}
kind: Deployment
metadata:
name: neuvector-manager-pod
namespace: {{ .Release.Namespace }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
replicas: 1
selector:
matchLabels:
app: neuvector-manager-pod
template:
metadata:
labels:
app: neuvector-manager-pod
release: {{ .Release.Name }}
{{- with .Values.manager.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.manager.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- if .Values.manager.affinity }}
affinity:
{{ toYaml .Values.manager.affinity | indent 8 }}
{{- end }}
{{- if .Values.manager.tolerations }}
tolerations:
{{ toYaml .Values.manager.tolerations | indent 8 }}
{{- end }}
{{- if .Values.manager.nodeSelector }}
nodeSelector:
{{ toYaml .Values.manager.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
- name: {{ .Values.imagePullSecrets }}
{{- end }}
{{- if .Values.manager.priorityClassName }}
priorityClassName: {{ .Values.manager.priorityClassName }}
{{- end }}
serviceAccountName: {{ .Values.serviceAccount }}
serviceAccount: {{ .Values.serviceAccount }}
{{- if .Values.manager.runAsUser }}
securityContext:
runAsUser: {{ .Values.manager.runAsUser }}
{{- end }}
containers:
- name: neuvector-manager-pod
image: {{ template "system_default_registry" . }}{{ .Values.manager.image.repository }}:{{ .Values.manager.image.tag }}
env:
- name: CTRL_SERVER_IP
value: neuvector-svc-controller.{{ .Release.Namespace }}
{{- if not .Values.manager.env.ssl }}
- name: MANAGER_SSL
value: "off"
{{- end }}
volumeMounts:
{{- if .Values.manager.certificate.secret }}
- mountPath: /etc/neuvector/certs/ssl-cert.key
subPath: {{ .Values.manager.certificate.keyFile }}
name: cert
readOnly: true
- mountPath: /etc/neuvector/certs/ssl-cert.pem
subPath: {{ .Values.manager.certificate.pemFile }}
name: cert
readOnly: true
{{- end }}
resources:
{{- if .Values.manager.resources }}
{{ toYaml .Values.manager.resources | indent 12 }}
{{- else }}
{{ toYaml .Values.resources | indent 12 }}
{{- end }}
restartPolicy: Always
volumes:
{{- if .Values.manager.certificate.secret }}
- name: cert
secret:
secretName: {{ .Values.manager.certificate.secret }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,71 @@
{{- if and .Values.manager.enabled .Values.manager.ingress.enabled -}}
{{- if (semverCompare ">=1.19-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: neuvector-webui-ingress
namespace: {{ .Release.Namespace }}
{{- with .Values.manager.ingress.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
{{- if .Values.manager.ingress.ingressClassName }}
ingressClassName: {{ .Values.manager.ingress.ingressClassName | quote }}
{{ end }}
{{- if .Values.manager.ingress.tls }}
tls:
- hosts:
- {{ .Values.manager.ingress.host }}
{{- if .Values.manager.ingress.secretName }}
secretName: {{ .Values.manager.ingress.secretName }}
{{- end }}
{{- end }}
rules:
- host: {{ .Values.manager.ingress.host }}
http:
paths:
- path: {{ .Values.manager.ingress.path }}
pathType: Prefix
backend:
service:
name: neuvector-service-webui
port:
number: 8443
{{- else }}
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: neuvector-webui-ingress
namespace: {{ .Release.Namespace }}
{{- with .Values.manager.ingress.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
{{- if .Values.manager.ingress.tls }}
tls:
- hosts:
- {{ .Values.manager.ingress.host }}
{{- if .Values.manager.ingress.secretName }}
secretName: {{ .Values.manager.ingress.secretName }}
{{- end }}
{{- end }}
rules:
- host: {{ .Values.manager.ingress.host }}
http:
paths:
- path: {{ .Values.manager.ingress.path }}
backend:
serviceName: neuvector-service-webui
servicePort: 8443
{{- end }}
{{- end -}}

View File

@ -0,0 +1,33 @@
{{- if .Values.openshift -}}
{{- if .Values.manager.route.enabled }}
{{- if (semverCompare ">=1.9-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
apiVersion: route.openshift.io/v1
{{- else }}
apiVersion: v1
{{- end }}
kind: Route
metadata:
name: neuvector-route-webui
namespace: {{ .Release.Namespace }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
{{- if .Values.manager.route.host }}
host: {{ .Values.manager.route.host }}
{{- end }}
to:
kind: Service
name: neuvector-service-webui
port:
targetPort: manager
tls:
termination: {{ .Values.manager.route.termination }}
{{- if or (eq .Values.manager.route.termination "reencrypt") (eq .Values.manager.route.termination "edge") }}
{{- with .Values.manager.route.tls }}
{{ toYaml . | indent 4 }}
{{- end }}
{{- end }}
{{- end }}
{{- end -}}

View File

@ -0,0 +1,26 @@
{{- if .Values.manager.enabled -}}
apiVersion: v1
kind: Service
metadata:
name: neuvector-service-webui
namespace: {{ .Release.Namespace }}
{{- with .Values.manager.svc.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
type: {{ .Values.manager.svc.type }}
{{- if and .Values.manager.svc.loadBalancerIP (eq .Values.manager.svc.type "LoadBalancer") }}
loadBalancerIP: {{ .Values.manager.svc.loadBalancerIP }}
{{- end }}
ports:
- port: 8443
name: manager
protocol: TCP
selector:
app: neuvector-manager-pod
{{- end }}

View File

@ -0,0 +1,77 @@
{{- if .Values.global.cattle.psp.enabled -}}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: neuvector-binding-psp
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
labels:
chart: {{ template "neuvector.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
spec:
privileged: true
readOnlyRootFilesystem: false
allowPrivilegeEscalation: true
allowedCapabilities:
- SYS_ADMIN
- NET_ADMIN
- SYS_PTRACE
- IPC_LOCK
requiredDropCapabilities:
- ALL
volumes:
- '*'
hostNetwork: true
hostPorts:
- min: 0
max: 65535
hostIPC: true
hostPID: true
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'RunAsAny'
fsGroup:
rule: 'RunAsAny'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: neuvector-binding-psp
namespace: {{ .Release.Namespace }}
labels:
chart: {{ template "neuvector.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
rules:
- apiGroups:
- policy
- extensions
resources:
- podsecuritypolicies
verbs:
- use
resourceNames:
- neuvector-binding-psp
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: neuvector-binding-psp
namespace: {{ .Release.Namespace }}
labels:
chart: {{ template "neuvector.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: neuvector-binding-psp
subjects:
- kind: ServiceAccount
name: {{ .Values.serviceAccount }}
namespace: {{ .Release.Namespace }}
{{- end }}

View File

@ -0,0 +1,27 @@
{{- if not .Values.controller.pvc.existingClaim -}}
{{- if and .Values.controller.enabled .Values.controller.pvc.enabled -}}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: neuvector-data
namespace: {{ .Release.Namespace }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
accessModes:
{{ toYaml .Values.controller.pvc.accessModes | indent 4 }}
volumeMode: Filesystem
{{- if .Values.controller.pvc.storageClass }}
storageClassName: {{ .Values.controller.pvc.storageClass }}
{{- end }}
resources:
requests:
{{- if .Values.controller.pvc.capacity }}
storage: {{ .Values.controller.pvc.capacity }}
{{- else }}
storage: 1Gi
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,56 @@
{{- if .Values.rbac -}}
{{- $oc4 := and .Values.openshift (semverCompare ">=1.12-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) -}}
{{- $oc3 := and .Values.openshift (not $oc4) (semverCompare ">=1.9-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) -}}
{{- if $oc3 }}
apiVersion: authorization.openshift.io/v1
{{- else if (semverCompare ">=1.8-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
apiVersion: rbac.authorization.k8s.io/v1
{{- else }}
apiVersion: v1
{{- end }}
kind: RoleBinding
metadata:
name: neuvector-admin
namespace: {{ .Release.Namespace }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
roleRef:
{{- if not $oc3 }}
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
{{- end }}
name: admin
subjects:
- kind: ServiceAccount
name: {{ .Values.serviceAccount }}
namespace: {{ .Release.Namespace }}
{{- if $oc3 }}
userNames:
- system:serviceaccount:{{ .Release.Namespace }}:{{ .Values.serviceAccount }}
{{- end }}
---
{{- if $oc4 }}
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: system:openshift:scc:privileged
namespace: {{ .Release.Namespace }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:openshift:scc:privileged
subjects:
- kind: ServiceAccount
name: {{ .Values.serviceAccount }}
namespace: {{ .Release.Namespace }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,97 @@
{{- if .Values.cve.scanner.enabled -}}
{{- if (semverCompare ">=1.9-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
apiVersion: apps/v1
{{- else }}
apiVersion: extensions/v1beta1
{{- end }}
kind: Deployment
metadata:
name: neuvector-scanner-pod
namespace: {{ .Release.Namespace }}
labels:
chart: {{ template "neuvector.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
spec:
strategy:
{{ toYaml .Values.cve.scanner.strategy | indent 4 }}
replicas: {{ .Values.cve.scanner.replicas }}
selector:
matchLabels:
app: neuvector-scanner-pod
template:
metadata:
labels:
app: neuvector-scanner-pod
{{- with .Values.cve.scanner.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.cve.scanner.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- if .Values.cve.scanner.affinity }}
affinity:
{{ toYaml .Values.cve.scanner.affinity | indent 8 }}
{{- end }}
{{- if .Values.cve.scanner.tolerations }}
tolerations:
{{ toYaml .Values.cve.scanner.tolerations | indent 8 }}
{{- end }}
{{- if .Values.cve.scanner.nodeSelector }}
nodeSelector:
{{ toYaml .Values.cve.scanner.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
- name: {{ .Values.imagePullSecrets }}
{{- end }}
{{- if .Values.cve.scanner.priorityClassName }}
priorityClassName: {{ .Values.cve.scanner.priorityClassName }}
{{- end }}
serviceAccountName: {{ .Values.serviceAccount }}
serviceAccount: {{ .Values.serviceAccount }}
{{- if .Values.cve.scanner.runAsUser }}
securityContext:
runAsUser: {{ .Values.cve.scanner.runAsUser }}
{{- end }}
containers:
- name: neuvector-scanner-pod
image: {{ template "system_default_registry" . }}{{ .Values.cve.scanner.image.repository }}:{{ .Values.cve.scanner.image.tag }}
imagePullPolicy: Always
env:
- name: CLUSTER_JOIN_ADDR
value: neuvector-svc-controller.{{ .Release.Namespace }}
{{- if .Values.cve.scanner.dockerPath }}
- name: SCANNER_DOCKER_URL
value: {{ .Values.cve.scanner.dockerPath }}
{{- end }}
{{- with .Values.cve.scanner.env }}
{{- toYaml . | nindent 12 }}
{{- end }}
resources:
{{ toYaml .Values.cve.scanner.resources | indent 12 }}
{{- if .Values.internal.certmanager.enabled }}
volumeMounts:
- mountPath: /etc/neuvector/certs/internal/cert.key
subPath: {{ .Values.cve.scanner.internal.certificate.keyFile }}
name: internal-cert
readOnly: true
- mountPath: /etc/neuvector/certs/internal/cert.pem
subPath: {{ .Values.cve.scanner.internal.certificate.pemFile }}
name: internal-cert
readOnly: true
- mountPath: /etc/neuvector/certs/internal/ca.cert
subPath: {{ .Values.cve.scanner.internal.certificate.caFile }}
name: internal-cert
readOnly: true
{{- end }}
restartPolicy: Always
{{- if .Values.internal.certmanager.enabled }}
volumes:
- name: internal-cert
secret:
secretName: {{ .Values.cve.scanner.internal.certificate.secret }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,13 @@
{{- if not .Values.openshift}}
{{- if ne .Values.serviceAccount "default"}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ .Values.serviceAccount }}
namespace: {{ .Release.Namespace }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,74 @@
{{- if .Values.cve.updater.enabled -}}
{{- if (semverCompare ">=1.21-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
apiVersion: batch/v1
{{- else if (semverCompare ">=1.8-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
apiVersion: batch/v1beta1
{{- else }}
apiVersion: batch/v2alpha1
{{- end }}
kind: CronJob
metadata:
name: neuvector-updater-pod
namespace: {{ .Release.Namespace }}
labels:
chart: {{ template "neuvector.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
schedule: {{ .Values.cve.updater.schedule | quote }}
jobTemplate:
spec:
template:
metadata:
labels:
app: neuvector-updater-pod
release: {{ .Release.Name }}
{{- with .Values.cve.updater.podLabels }}
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.cve.updater.podAnnotations }}
annotations:
{{- toYaml . | nindent 12 }}
{{- end }}
spec:
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
- name: {{ .Values.imagePullSecrets }}
{{- end }}
{{- if .Values.cve.updater.nodeSelector }}
nodeSelector:
{{ toYaml .Values.cve.updater.nodeSelector | indent 12 }}
{{- end }}
{{- if .Values.cve.updater.priorityClassName }}
priorityClassName: {{ .Values.cve.updater.priorityClassName }}
{{- end }}
serviceAccountName: {{ .Values.serviceAccount }}
serviceAccount: {{ .Values.serviceAccount }}
{{- if .Values.cve.updater.runAsUser }}
securityContext:
runAsUser: {{ .Values.cve.updater.runAsUser }}
{{- end }}
containers:
- name: neuvector-updater-pod
image: {{ template "system_default_registry" . }}{{ .Values.cve.updater.image.repository }}:{{ .Values.cve.updater.image.tag }}
imagePullPolicy: Always
command:
- /bin/sh
- -c
- sleep 30
{{- if .Values.cve.scanner.enabled }}
command:
- /bin/sh
- -c
{{- if (semverCompare ">=1.9-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }}
{{- if .Values.cve.updater.secure }}
- /usr/bin/curl -v -X PATCH -H "Authorization:Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" -H "Content-Type:application/strategic-merge-patch+json" -d '{"spec":{"template":{"metadata":{"annotations":{"kubectl.kubernetes.io/restartedAt":"'`date +%Y-%m-%dT%H:%M:%S%z`'"}}}}}' 'https://kubernetes.default/apis/apps/v1/namespaces/{{ .Release.Namespace }}/deployments/neuvector-scanner-pod'
{{- else }}
- /usr/bin/curl -kv -X PATCH -H "Authorization:Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" -H "Content-Type:application/strategic-merge-patch+json" -d '{"spec":{"template":{"metadata":{"annotations":{"kubectl.kubernetes.io/restartedAt":"'`date +%Y-%m-%dT%H:%M:%S%z`'"}}}}}' 'https://kubernetes.default/apis/apps/v1/namespaces/{{ .Release.Namespace }}/deployments/neuvector-scanner-pod'
{{- end }}
{{- else }}
- /usr/bin/curl -kv -X PATCH -H "Authorization:Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" -H "Content-Type:application/strategic-merge-patch+json" -d '{"spec":{"template":{"metadata":{"annotations":{"kubectl.kubernetes.io/restartedAt":"'`date +%Y-%m-%dT%H:%M:%S%z`'"}}}}}' 'https://kubernetes.default/apis/extensions/v1beta1/namespaces/{{ .Release.Namespace }}/deployments/neuvector-scanner-pod'
{{- end }}
{{- end }}
restartPolicy: Never
{{- end }}

View File

@ -0,0 +1,7 @@
{{- if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 -}}
{{- if .Values.global.cattle.psp.enabled }}
{{- if not (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }}
{{- fail "The target cluster does not have the PodSecurityPolicy API resource. Please disable PSPs in this chart before proceeding." -}}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,411 @@
# Default values for neuvector.
# This is a YAML-formatted file.
# Declare variables to be passed into the templates.
global:
cattle:
systemDefaultRegistry: ""
psp:
enabled: false # PSP enablement should default to false
openshift: false
registry: docker.io
oem:
rbac: true
serviceAccount: neuvector
internal: # enable when cert-manager is installed for the internal certificates
certmanager:
enabled: false
secretname: neuvector-internal
controller:
# If false, controller will not be installed
enabled: true
annotations: {}
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
image:
repository: rancher/mirrored-neuvector-controller
tag: 5.1.2
hash:
replicas: 3
disruptionbudget: 0
schedulerName:
priorityClassName:
podLabels: {}
podAnnotations: {}
env: []
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- neuvector-controller-pod
topologyKey: "kubernetes.io/hostname"
tolerations: []
nodeSelector: {}
# key1: value1
# key2: value2
apisvc:
type:
annotations: {}
# OpenShift Route configuration
# Controller supports HTTPS only, so edge termination not supported
route:
enabled: false
termination: passthrough
host:
tls:
#certificate: |
# -----BEGIN CERTIFICATE-----
# -----END CERTIFICATE-----
#caCertificate: |
# -----BEGIN CERTIFICATE-----
# -----END CERTIFICATE-----
#destinationCACertificate: |
# -----BEGIN CERTIFICATE-----
# -----END CERTIFICATE-----
#key: |
# -----BEGIN PRIVATE KEY-----
# -----END PRIVATE KEY-----
ranchersso:
enabled: true
pvc:
enabled: false
existingClaim: false
accessModes:
- ReadWriteMany
storageClass:
capacity:
azureFileShare:
enabled: false
secretName:
shareName:
certificate:
secret:
keyFile: tls.key
pemFile: tls.pem
internal: # this is used for internal communication. Please use the SAME CA for all the components ( controller, scanner, and enforcer ) The cert needs to have a CN of "NeuVector"
certificate:
secret: neuvector-internal
keyFile: tls.key
pemFile: tls.crt
caFile: ca.crt # must be the same CA for all internal.
federation:
mastersvc:
type:
# Federation Master Ingress
ingress:
enabled: false
host: # MUST be set, if ingress is enabled
ingressClassName: ""
path: "/" # or this could be "/api", but might need "rewrite-target" annotation
annotations:
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
# ingress.kubernetes.io/rewrite-target: /
tls: false
secretName:
annotations: {}
# OpenShift Route configuration
# Controller supports HTTPS only, so edge termination not supported
route:
enabled: false
termination: passthrough
host:
tls:
#certificate: |
# -----BEGIN CERTIFICATE-----
# -----END CERTIFICATE-----
#caCertificate: |
# -----BEGIN CERTIFICATE-----
# -----END CERTIFICATE-----
#destinationCACertificate: |
# -----BEGIN CERTIFICATE-----
# -----END CERTIFICATE-----
#key: |
# -----BEGIN PRIVATE KEY-----
# -----END PRIVATE KEY-----
managedsvc:
type:
# Federation Managed Ingress
ingress:
enabled: false
host: # MUST be set, if ingress is enabled
ingressClassName: ""
path: "/" # or this could be "/api", but might need "rewrite-target" annotation
annotations:
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
# ingress.kubernetes.io/rewrite-target: /
tls: false
secretName:
annotations: {}
# OpenShift Route configuration
# Controller supports HTTPS only, so edge termination not supported
route:
enabled: false
termination: passthrough
host:
tls:
#certificate: |
# -----BEGIN CERTIFICATE-----
# -----END CERTIFICATE-----
#caCertificate: |
# -----BEGIN CERTIFICATE-----
# -----END CERTIFICATE-----
#destinationCACertificate: |
# -----BEGIN CERTIFICATE-----
# -----END CERTIFICATE-----
#key: |
# -----BEGIN PRIVATE KEY-----
# -----END PRIVATE KEY-----
ingress:
enabled: false
host: # MUST be set, if ingress is enabled
ingressClassName: ""
path: "/" # or this could be "/api", but might need "rewrite-target" annotation
annotations:
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
# ingress.kubernetes.io/rewrite-target: /
tls: false
secretName:
resources: {}
# limits:
# cpu: 400m
# memory: 2792Mi
# requests:
# cpu: 100m
# memory: 2280Mi
configmap:
enabled: false
data:
# eulainitcfg.yaml: |
# ...
# ldapinitcfg.yaml: |
# ...
# oidcinitcfg.yaml: |
# ...
# samlinitcfg.yaml: |
# ...
# sysinitcfg.yaml: |
# ...
# userinitcfg.yaml: |
# ...
secret:
# NOTE: files defined here have preferrence over the ones defined in the configmap section
enabled: false
data: {}
# eulainitcfg.yaml:
# license_key: 0Bca63Iy2FiXGqjk...
# ...
# ldapinitcfg.yaml:
# directory: OpenLDAP
# ...
# oidcinitcfg.yaml:
# Issuer: https://...
# ...
# samlinitcfg.yaml:
# ...
# sysinitcfg.yaml:
# ...
# userinitcfg.yaml:
# ...
enforcer:
# If false, enforcer will not be installed
enabled: true
image:
repository: rancher/mirrored-neuvector-enforcer
tag: 5.1.2
hash:
updateStrategy:
type: RollingUpdate
priorityClassName:
podLabels: {}
podAnnotations: {}
env: []
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
resources: {}
# limits:
# cpu: 400m
# memory: 2792Mi
# requests:
# cpu: 100m
# memory: 2280Mi
internal: # this is used for internal communication. Please use the SAME CA for all the components ( controller, scanner, and enforcer ) The cert needs to have a CN of "NeuVector"
certificate:
secret: neuvector-internal
keyFile: tls.key
pemFile: tls.crt
caFile: ca.crt # must be the same CA for all internal.
manager:
# If false, manager will not be installed
enabled: true
image:
repository: rancher/mirrored-neuvector-manager
tag: 5.1.2
hash:
priorityClassName:
env:
ssl: true
svc:
type: NodePort # should be set to - ClusterIP
loadBalancerIP:
annotations: {}
# azure
# service.beta.kubernetes.io/azure-load-balancer-internal: "true"
# service.beta.kubernetes.io/azure-load-balancer-internal-subnet: "apps-subnet"
# OpenShift Route configuration
# Make sure manager env ssl is false for edge termination
route:
enabled: true
termination: passthrough
host:
tls:
#certificate: |
# -----BEGIN CERTIFICATE-----
# -----END CERTIFICATE-----
#caCertificate: |
# -----BEGIN CERTIFICATE-----
# -----END CERTIFICATE-----
#destinationCACertificate: |
# -----BEGIN CERTIFICATE-----
# -----END CERTIFICATE-----
#key: |
# -----BEGIN PRIVATE KEY-----
# -----END PRIVATE KEY-----
certificate:
secret:
keyFile: tls.key
pemFile: tls.pem
ingress:
enabled: false
host: # MUST be set, if ingress is enabled
ingressClassName: ""
path: "/"
annotations:
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
# kubernetes.io/ingress.class: my-nginx
# nginx.ingress.kubernetes.io/whitelist-source-range: "1.1.1.1"
# nginx.ingress.kubernetes.io/rewrite-target: /
# nginx.ingress.kubernetes.io/enable-rewrite-log: "true"
# only for end-to-end tls conf - ingress-nginx accepts backend self-signed cert
tls: false
secretName: # my-tls-secret
resources: {}
# limits:
# cpu: 400m
# memory: 2792Mi
# requests:
# cpu: 100m
# memory: 2280Mi
affinity: {}
podLabels: {}
podAnnotations: {}
tolerations: []
nodeSelector: {}
# key1: value1
# key2: value2
runAsUser: # MUST be set for Rancher hardened cluster
cve:
updater:
# If false, cve updater will not be installed
enabled: true
secure: false
image:
repository: rancher/mirrored-neuvector-updater
tag: latest
hash:
schedule: "0 0 * * *"
priorityClassName:
podLabels: {}
podAnnotations: {}
nodeSelector: {}
# key1: value1
# key2: value2
runAsUser: # MUST be set for Rancher hardened cluster
scanner:
enabled: true
replicas: 3
dockerPath: ""
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
image:
repository: rancher/mirrored-neuvector-scanner
tag: latest
hash:
priorityClassName:
resources: {}
# limits:
# cpu: 400m
# memory: 2792Mi
# requests:
# cpu: 100m
# memory: 2280Mi
affinity: {}
podLabels: {}
podAnnotations: {}
env: []
tolerations: []
nodeSelector: {}
# key1: value1
# key2: value2
runAsUser: # MUST be set for Rancher hardened cluster
internal: # this is used for internal communication. Please use the SAME CA for all the components ( controller, scanner, and enforcer ) The cert needs to have a CN of "NeuVector"
certificate:
secret: neuvector-internal
keyFile: tls.key
pemFile: tls.crt
caFile: ca.crt # must be the same CA for all internal.
docker:
path: /var/run/docker.sock
resources: {}
# limits:
# cpu: 400m
# memory: 2792Mi
# requests:
# cpu: 100m
# memory: 2280Mi
k3s:
enabled: false
runtimePath: /run/k3s/containerd/containerd.sock
bottlerocket:
enabled: false
runtimePath: /run/dockershim.sock
containerd:
enabled: false
path: /var/run/containerd/containerd.sock
crio:
enabled: false
path: /var/run/crio/crio.sock
admissionwebhook:
type: ClusterIP
crdwebhook:
enabled: true
type: ClusterIP

View File

@ -0,0 +1,11 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/namespace: cattle-resources-system
catalog.cattle.io/release-name: rancher-backup-crd
apiVersion: v2
appVersion: 3.1.1-rc2
description: Installs the CRDs for rancher-backup.
name: rancher-backup-crd
type: application
version: 102.0.1+up3.1.1-rc2

View File

@ -0,0 +1,3 @@
# Rancher Backup CRD
A Rancher chart that installs the CRDs used by `rancher-backup`.

View File

@ -0,0 +1,141 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: backups.resources.cattle.io
spec:
group: resources.cattle.io
names:
kind: Backup
plural: backups
singular: backup
scope: Cluster
versions:
- additionalPrinterColumns:
- jsonPath: .status.storageLocation
name: Location
type: string
- jsonPath: .status.backupType
name: Type
type: string
- jsonPath: .status.filename
name: Latest-Backup
type: string
- jsonPath: .spec.resourceSetName
name: ResourceSet
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
- jsonPath: .status.conditions[?(@.type=="Ready")].message
name: Status
type: string
name: v1
schema:
openAPIV3Schema:
properties:
spec:
properties:
encryptionConfigSecretName:
description: Name of the Secret containing the encryption config
nullable: true
type: string
resourceSetName:
description: Name of the ResourceSet CR to use for backup
nullable: true
type: string
retentionCount:
minimum: 1
type: integer
schedule:
description: Cron schedule for recurring backups
example:
Descriptors: '@midnight'
Standard crontab specs: 0 0 * * *
nullable: true
type: string
storageLocation:
nullable: true
properties:
s3:
nullable: true
properties:
bucketName:
nullable: true
type: string
credentialSecretName:
nullable: true
type: string
credentialSecretNamespace:
nullable: true
type: string
endpoint:
nullable: true
type: string
endpointCA:
nullable: true
type: string
folder:
nullable: true
type: string
insecureTLSSkipVerify:
type: boolean
region:
nullable: true
type: string
type: object
type: object
required:
- resourceSetName
type: object
status:
properties:
backupType:
nullable: true
type: string
conditions:
items:
properties:
lastTransitionTime:
nullable: true
type: string
lastUpdateTime:
nullable: true
type: string
message:
nullable: true
type: string
reason:
nullable: true
type: string
status:
nullable: true
type: string
type:
nullable: true
type: string
type: object
nullable: true
type: array
filename:
nullable: true
type: string
lastSnapshotTs:
nullable: true
type: string
nextSnapshotAt:
nullable: true
type: string
observedGeneration:
type: integer
storageLocation:
nullable: true
type: string
summary:
nullable: true
type: string
type: object
type: object
served: true
storage: true
subresources:
status: {}

View File

@ -0,0 +1,118 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: resourcesets.resources.cattle.io
spec:
group: resources.cattle.io
names:
kind: ResourceSet
plural: resourcesets
singular: resourceset
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
properties:
controllerReferences:
items:
properties:
apiVersion:
nullable: true
type: string
name:
nullable: true
type: string
namespace:
nullable: true
type: string
replicas:
type: integer
resource:
nullable: true
type: string
type: object
nullable: true
type: array
resourceSelectors:
items:
properties:
apiVersion:
nullable: true
type: string
excludeKinds:
items:
nullable: true
type: string
nullable: true
type: array
excludeResourceNameRegexp:
nullable: true
type: string
kinds:
items:
nullable: true
type: string
nullable: true
type: array
kindsRegexp:
nullable: true
type: string
labelSelectors:
nullable: true
properties:
matchExpressions:
items:
properties:
key:
nullable: true
type: string
operator:
nullable: true
type: string
values:
items:
nullable: true
type: string
nullable: true
type: array
type: object
nullable: true
type: array
matchLabels:
additionalProperties:
nullable: true
type: string
nullable: true
type: object
type: object
namespaceRegexp:
nullable: true
type: string
namespaces:
items:
nullable: true
type: string
nullable: true
type: array
resourceNameRegexp:
nullable: true
type: string
resourceNames:
items:
nullable: true
type: string
nullable: true
type: array
type: object
nullable: true
required:
- apiVersion
type: array
required:
- resourceSelectors
type: object
served: true
storage: true
subresources:
status: {}

View File

@ -0,0 +1,122 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: restores.resources.cattle.io
spec:
group: resources.cattle.io
names:
kind: Restore
plural: restores
singular: restore
scope: Cluster
versions:
- additionalPrinterColumns:
- jsonPath: .status.backupSource
name: Backup-Source
type: string
- jsonPath: .spec.backupFilename
name: Backup-File
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
- jsonPath: .status.conditions[?(@.type=="Ready")].message
name: Status
type: string
name: v1
schema:
openAPIV3Schema:
properties:
spec:
properties:
backupFilename:
nullable: true
type: string
deleteTimeoutSeconds:
maximum: 10
type: integer
encryptionConfigSecretName:
nullable: true
type: string
ignoreErrors:
type: boolean
prune:
nullable: true
type: boolean
storageLocation:
nullable: true
properties:
s3:
nullable: true
properties:
bucketName:
nullable: true
type: string
credentialSecretName:
nullable: true
type: string
credentialSecretNamespace:
nullable: true
type: string
endpoint:
nullable: true
type: string
endpointCA:
nullable: true
type: string
folder:
nullable: true
type: string
insecureTLSSkipVerify:
type: boolean
region:
nullable: true
type: string
type: object
type: object
required:
- backupFilename
type: object
status:
properties:
backupSource:
nullable: true
type: string
conditions:
items:
properties:
lastTransitionTime:
nullable: true
type: string
lastUpdateTime:
nullable: true
type: string
message:
nullable: true
type: string
reason:
nullable: true
type: string
status:
nullable: true
type: string
type:
nullable: true
type: string
type: object
nullable: true
type: array
observedGeneration:
type: integer
restoreCompletionTs:
nullable: true
type: string
summary:
nullable: true
type: string
type: object
type: object
served: true
storage: true
subresources:
status: {}

View File

@ -0,0 +1,26 @@
annotations:
catalog.cattle.io/auto-install: rancher-backup-crd=match
catalog.cattle.io/certified: rancher
catalog.cattle.io/display-name: Rancher Backups
catalog.cattle.io/kube-version: '>= 1.16.0-0 < 1.26.0-0'
catalog.cattle.io/namespace: cattle-resources-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/provides-gvr: resources.cattle.io.resourceset/v1
catalog.cattle.io/rancher-version: '>= 2.7.0-0 < 2.8.0-0'
catalog.cattle.io/release-name: rancher-backup
catalog.cattle.io/scope: management
catalog.cattle.io/type: cluster-tool
catalog.cattle.io/ui-component: rancher-backup
catalog.cattle.io/upstream-version: 2.1.1
apiVersion: v2
appVersion: 3.1.1-rc2
description: Provides ability to back up and restore the Rancher application running
on any Kubernetes cluster
icon: https://charts.rancher.io/assets/logos/backup-restore.svg
keywords:
- applications
- infrastructure
kubeVersion: '>= 1.16.0-0'
name: rancher-backup
version: 102.0.1+up3.1.1-rc2

View File

@ -0,0 +1,79 @@
# Rancher Backup
This chart provides ability to back up and restore the Rancher application running on any Kubernetes cluster.
Refer [this](https://github.com/rancher/backup-restore-operator) repository for implementation details.
-----
### Get Repo Info
```bash
helm repo add rancher-chart https://charts.rancher.io
helm repo update
```
-----
### Install Chart
```bash
helm install rancher-backup-crd rancher-chart/rancher-backup-crd -n cattle-resources-system --create-namespace
helm install rancher-backup rancher-chart/rancher-backup -n cattle-resources-system
```
-----
### Configuration
The following table lists the configurable parameters of the rancher-backup chart and their default values:
| Parameter | Description | Default |
|----------|---------------|-------|
| image.repository | Container image repository | rancher/backup-restore-operator |
| image.tag | Container image tag | v0.1.0-rc1 |
| s3.enabled | Configure S3 compatible default storage location. Current version supports S3 and MinIO | false |
| s3.credentialSecretName | Name of the Secret containing S3 credentials. This is an optional field. Skip this field in order to use IAM Role authentication. The Secret must contain following two keys, `accessKey` and `secretKey` | "" |
| s3.credentialSecretNamespace | Namespace of the Secret containing S3 credentials. This can be any namespace. | "" |
| s3.region | Region of the S3 Bucket (Required for S3, not valid for MinIO) | "" |
| s3.bucketName | Name of the Bucket | "" |
| s3.folder | Base folder within the Bucket (optional) | "" |
| s3.endpoint | Endpoint for the S3 storage provider | "" |
| s3.endpointCA | Base64 encoded CA cert for the S3 storage provider (optional) | "" |
| s3.insecureTLSSkipVerify | Skip SSL verification | false |
| persistence.enabled | Configure a Persistent Volume as the default storage location. It accepts either a StorageClass name to create a PVC, or directly accepts the PV to use. The Persistent Volume is mounted at `/var/lib/backups` in the operator pod | false |
| persistence.storageClass | StorageClass to use for dynamically provisioning the Persistent Volume, which will be used for storing backups | "" |
| persistence.volumeName | Persistent Volume to use for storing backups | "" |
| persistence.size | Requested size of the Persistent Volume (Applicable when using dynamic provisioning) | "" |
| debug | Set debug flag for backup-restore deployment | false |
| trace | Set trace flag for backup-restore deployment | false |
| nodeSelector | https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector | {} |
| tolerations | https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration | [] |
| affinity | https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity | {} |
| serviceAccount.annotations | Annotations to apply to created service account | {} |
| global.cattle.psp.enabled | Enable or disable PSPs in the chart | false |
-----
### PSPs
We have added a configuration to the chart `values.yaml` which allows you to enable or disable PSPs to align with the PSP deprecation in Kubernetes `v1.25` and above.
-----
### CRDs
Refer [this](https://github.com/rancher/backup-restore-operator#crds) section for information on CRDs that this chart installs. Also refer [this](https://github.com/rancher/backup-restore-operator/tree/master/examples) folder containing sample manifests for the CRDs.
-----
### Upgrading Chart
```bash
helm upgrade rancher-backup-crd -n cattle-resources-system
helm upgrade rancher-backup -n cattle-resources-system
```
-----
### Uninstall Chart
```bash
helm uninstall rancher-backup -n cattle-resources-system
helm uninstall rancher-backup-crd -n cattle-resources-system
```

View File

@ -0,0 +1,33 @@
# Rancher Backup
This chart enables ability to capture backups of the Rancher application and restore from these backups. This chart can be used to migrate Rancher from one Kubernetes cluster to a different Kubernetes cluster.
For more information on how to use the feature, refer to our [docs](https://ranchermanager.docs.rancher.com/pages-for-subheaders/backup-restore-and-disaster-recovery).
This chart installs the following components:
- [backup-restore-operator](https://github.com/rancher/backup-restore-operator)
- The operator handles backing up all Kubernetes resources and CRDs that Rancher creates and manages from the local cluster. It gathers these resources by querying the Kubernetes API server, packages all the resources to create a tarball file and saves it in the configured backup storage location.
- The operator can be configured to store backups in S3-compatible object stores such as AWS S3 and MinIO, and in persistent volumes. During deployment, you can create a default storage location, but there is always the option to override the default storage location with each backup, but will be limited to using an S3-compatible object store.
- It preserves the ownerReferences on all resources, hence maintaining dependencies between objects.
- This operator provides encryption support, to encrypt user specified resources before saving them in the backup file. It uses the same encryption configuration that is used to enable [Kubernetes Encryption at Rest](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/).
- Backup - A backup is a CRD (`Backup`) that defines when to take backups, where to store the backup and what encryption to use (optional). Backups can be taken ad hoc or scheduled to be taken in intervals.
- Restore - A restore is a CRD (`Restore`) that defines which backup to use to restore the Rancher application to.
## Upgrading to Kubernetes v1.25+
Starting in Kubernetes v1.25, [Pod Security Policies](https://kubernetes.io/docs/concepts/security/pod-security-policy/) have been removed from the Kubernetes API.
As a result, **before upgrading to Kubernetes v1.25** (or on a fresh install in a Kubernetes v1.25+ cluster), users are expected to perform an in-place upgrade of this chart with `global.cattle.psp.enabled` set to `false` if it has been previously set to `true`.
> **Note:**
> In this chart release, any previous field that was associated with any PSP resources have been removed in favor of a single global field: `global.cattle.psp.enabled`.
> **Note:**
> If you upgrade your cluster to Kubernetes v1.25+ before removing PSPs via a `helm upgrade` (even if you manually clean up resources), **it will leave the Helm release in a broken state within the cluster such that further Helm operations will not work (`helm uninstall`, `helm upgrade`, etc.).**
>
> If your charts get stuck in this state, please consult the Rancher docs on how to clean up your Helm release secrets.
Upon setting `global.cattle.psp.enabled` to false, the chart will remove any PSP resources deployed on its behalf from the cluster. This is the default setting for this chart.
As a replacement for PSPs, [Pod Security Admission](https://kubernetes.io/docs/concepts/security/pod-security-admission/) should be used. Please consult the Rancher docs for more details on how to configure your chart release namespaces to work with the new Pod Security Admission and apply Pod Security Standards.

View File

@ -0,0 +1,25 @@
- apiVersion: "apiextensions.k8s.io/v1"
kindsRegexp: "."
resourceNameRegexp: "aks.cattle.io$"
- apiVersion: "aks.cattle.io/v1"
kindsRegexp: "."
- apiVersion: "apps/v1"
kindsRegexp: "^deployments$"
namespaces:
- "cattle-system"
resourceNames:
- "aks-config-operator"
- apiVersion: "rbac.authorization.k8s.io/v1"
kindsRegexp: "^clusterroles$"
resourceNames:
- "aks-operator"
- apiVersion: "rbac.authorization.k8s.io/v1"
kindsRegexp: "^clusterrolebindings$"
resourceNames:
- "aks-operator"
- apiVersion: "v1"
kindsRegexp: "^serviceaccounts$"
namespaces:
- "cattle-system"
resourceNames:
- "aks-operator"

View File

@ -0,0 +1,17 @@
- apiVersion: "eks.cattle.io/v1"
kindsRegexp: "."
- apiVersion: "apps/v1"
kindsRegexp: "^deployments$"
resourceNames:
- "eks-config-operator"
- apiVersion: "apiextensions.k8s.io/v1"
kindsRegexp: "."
resourceNameRegexp: "eks.cattle.io$"
- apiVersion: "rbac.authorization.k8s.io/v1"
kindsRegexp: "^clusterroles$"
resourceNames:
- "eks-operator"
- apiVersion: "rbac.authorization.k8s.io/v1"
kindsRegexp: "^clusterrolebindings$"
resourceNames:
- "eks-operator"

View File

@ -0,0 +1,49 @@
- apiVersion: "apiextensions.k8s.io/v1"
kindsRegexp: "."
resourceNameRegexp: "elemental.cattle.io$"
- apiVersion: "apps/v1"
kindsRegexp: "^deployments$"
namespaces:
- "cattle-elemental-system"
resourceNames:
- "elemental-operator"
- apiVersion: "rbac.authorization.k8s.io/v1"
kindsRegexp: "^clusterroles$"
resourceNames:
- "elemental-operator"
- apiVersion: "rbac.authorization.k8s.io/v1"
kindsRegexp: "^clusterrolebindings$"
resourceNames:
- "elemental-operator"
- apiVersion: "v1"
kindsRegexp: "^serviceaccounts$"
namespaces:
- "cattle-elemental-system"
resourceNames:
- "elemental-operator"
- apiVersion: "management.cattle.io/v3"
kindsRegexp: "^globalrole$"
resourceNames:
- "elemental-operator"
- apiVersion: "management.cattle.io/v3"
kindsRegexp: "^apiservice$"
resourceNameRegexp: "elemental.cattle.io$"
- apiVersion: "elemental.cattle.io/v1beta1"
kindsRegexp: "."
namespaceRegexp: "^cattle-fleet-|^fleet-|^cluster-fleet-"
- apiVersion: "rbac.authorization.k8s.io/v1"
kindsRegexp: "^roles$|^rolebindings$"
labelSelectors:
matchExpressions:
- key: "elemental.cattle.io/managed"
operator: "In"
values: ["true"]
namespaceRegexp: "^cattle-fleet-|^fleet-|^cluster-fleet-"
- apiVersion: "v1"
kindsRegexp: "^secrets$|^serviceaccounts$"
labelSelectors:
matchExpressions:
- key: "elemental.cattle.io/managed"
operator: "In"
values: ["true"]
namespaceRegexp: "^cattle-fleet-|^fleet-|^cluster-fleet-"

View File

@ -0,0 +1,51 @@
- apiVersion: "v1"
kindsRegexp: "^namespaces$"
resourceNameRegexp: "^fleet-|^cluster-fleet-"
- apiVersion: "v1"
kindsRegexp: "^secrets$"
namespaceRegexp: "^cattle-fleet-|^fleet-|^cluster-fleet-"
excludeResourceNameRegexp: "^import-token"
labelSelectors:
matchExpressions:
- key: "owner"
operator: "NotIn"
values: ["helm"]
- key: "fleet.cattle.io/managed"
operator: "In"
values: ["true"]
- apiVersion: "v1"
kindsRegexp: "^serviceaccounts$"
namespaceRegexp: "^cattle-fleet-|^fleet-|^cluster-fleet-"
excludeResourceNameRegexp: "^default$"
- apiVersion: "v1"
kindsRegexp: "^configmaps$"
namespaceRegexp: "^cattle-fleet-|^fleet-|^cluster-fleet-"
- apiVersion: "rbac.authorization.k8s.io/v1"
kindsRegexp: "^roles$|^rolebindings$"
namespaceRegexp: "^cattle-fleet-|^fleet-|^cluster-fleet-"
- apiVersion: "rbac.authorization.k8s.io/v1"
kindsRegexp: "^clusterrolebindings$"
resourceNameRegexp: "^fleet-|^gitjob-"
- apiVersion: "rbac.authorization.k8s.io/v1"
kindsRegexp: "^clusterroles$"
resourceNameRegexp: "^fleet-"
resourceNames:
- "gitjob"
- apiVersion: "apiextensions.k8s.io/v1"
kindsRegexp: "."
resourceNameRegexp: "fleet.cattle.io$|gitjob.cattle.io$"
- apiVersion: "fleet.cattle.io/v1alpha1"
kindsRegexp: "."
- apiVersion: "gitjob.cattle.io/v1"
kindsRegexp: "."
- apiVersion: "apps/v1"
kindsRegexp: "^deployments$"
namespaceRegexp: "^cattle-fleet-|^fleet-|^cluster-fleet-"
resourceNameRegexp: "^fleet-"
resourceNames:
- "gitjob"
- apiVersion: "apps/v1"
kindsRegexp: "^services$"
namespaceRegexp: "^cattle-fleet-|^fleet-|^cluster-fleet-"
resourceNames:
- "gitjob"

View File

@ -0,0 +1,17 @@
- apiVersion: "apiextensions.k8s.io/v1"
kindsRegexp: "."
resourceNameRegexp: "gke.cattle.io$"
- apiVersion: "gke.cattle.io/v1"
kindsRegexp: "."
- apiVersion: "apps/v1"
kindsRegexp: "^deployments$"
resourceNames:
- "gke-config-operator"
- apiVersion: "rbac.authorization.k8s.io/v1"
kindsRegexp: "^clusterroles$"
resourceNames:
- "gke-operator"
- apiVersion: "rbac.authorization.k8s.io/v1"
kindsRegexp: "^clusterrolebindings$"
resourceNames:
- "gke-operator"

View File

@ -0,0 +1,25 @@
- apiVersion: "apiextensions.k8s.io/v1"
kindsRegexp: "."
resourceNameRegexp: "provisioning.cattle.io$|rke-machine-config.cattle.io$|rke-machine.cattle.io$|rke.cattle.io$|cluster.x-k8s.io$"
- apiVersion: "provisioning.cattle.io/v1"
kindsRegexp: "."
- apiVersion: "rke-machine-config.cattle.io/v1"
kindsRegexp: "."
- apiVersion: "rke-machine.cattle.io/v1"
kindsRegexp: "."
- apiVersion: "rke.cattle.io/v1"
kindsRegexp: "."
- apiVersion: "cluster.x-k8s.io/v1alpha4"
kindsRegexp: "."
- apiVersion: "cluster.x-k8s.io/v1beta1"
kindsRegexp: "."
- apiVersion: "v1"
kindsRegexp: "^secrets$"
resourceNameRegexp: "machine-plan$|rke-state$|machine-state$|machine-driver-secret$|machine-provision$|^harvesterconfig"
namespaces:
- "fleet-default"
- apiVersion: "v1"
kindsRegexp: "^configmaps$"
resourceNames:
- "provisioning-log"
namespaceRegexp: "^c-m-"

View File

@ -0,0 +1,28 @@
- apiVersion: "rancher.cattle.io/v1"
kindsRegexp: "."
- apiVersion: "apps/v1"
kindsRegexp: "^deployments$"
resourceNames:
- "rancher-operator"
namespaces:
- "rancher-operator-system"
- apiVersion: "v1"
kindsRegexp: "^serviceaccounts$"
namespaces:
- "rancher-operator-system"
excludeResourceNameRegexp: "^default$"
- apiVersion: "rbac.authorization.k8s.io/v1"
kindsRegexp: "^clusterrolebindings$"
resourceNames:
- "rancher-operator"
- apiVersion: "rbac.authorization.k8s.io/v1"
kindsRegexp: "^clusterroles$"
resourceNames:
- "rancher-operator"
- apiVersion: "apiextensions.k8s.io/v1"
kindsRegexp: "."
resourceNameRegexp: "rancher.cattle.io$"
- apiVersion: "v1"
kindsRegexp: "^namespaces$"
resourceNames:
- "rancher-operator-system"

View File

@ -0,0 +1,65 @@
- apiVersion: "v1"
kindsRegexp: "^namespaces$"
resourceNameRegexp: "^cattle-|^p-|^c-|^user-|^u-"
resourceNames:
- "local"
- apiVersion: "v1"
kindsRegexp: "^secrets$"
namespaceRegexp: "^cattle-|^p-|^c-|^local$|^user-|^u-"
labelSelectors:
matchExpressions:
- key: "owner"
operator: "NotIn"
values: ["helm"]
excludeResourceNameRegexp: "^bootstrap-secret$|^rancher-csp-adapter|^csp-adapter-cache$"
- apiVersion: "v1"
kindsRegexp: "^serviceaccounts$"
namespaceRegexp: "^cattle-|^p-|^c-|^local$|^user-|^u-"
excludeResourceNameRegexp: "^default$|^rancher-csp-adapter$"
- apiVersion: "v1"
kindsRegexp: "^configmaps$"
namespaces:
- "cattle-system"
- apiVersion: "rbac.authorization.k8s.io/v1"
kindsRegexp: "^roles$|^rolebindings$"
namespaceRegexp: "^cattle-|^p-|^c-|^local$|^user-|^u-"
excludeResourceNameRegexp: "^rancher-csp-adapter"
- apiVersion: "rbac.authorization.k8s.io/v1"
kindsRegexp: "^clusterrolebindings$"
resourceNameRegexp: "^cattle-|^clusterrolebinding-|^globaladmin-user-|^grb-u-|^crb-"
- apiVersion: "rbac.authorization.k8s.io/v1"
kindsRegexp: "^clusterroles$"
resourceNameRegexp: "^cattle-|^p-|^c-|^local-|^user-|^u-|^project-|^create-ns$"
excludeResourceNameRegexp: "^rancher-csp-adapter-"
- apiVersion: "scheduling.k8s.io/v1"
kindsRegexp: "^priorityclasses$"
resourceNameRegexp: "^rancher-critical$"
- apiVersion: "apiextensions.k8s.io/v1"
kindsRegexp: "."
resourceNameRegexp: "management.cattle.io$|project.cattle.io$|catalog.cattle.io$|resources.cattle.io$"
- apiVersion: "management.cattle.io/v3"
kindsRegexp: "."
excludeKinds:
- "tokens"
- "rancherusernotifications"
- apiVersion: "management.cattle.io/v3"
kindsRegexp: "^tokens$"
labelSelectors:
matchExpressions:
- key: "authn.management.cattle.io/kind"
operator: "NotIn"
values: [ "provisioning" ]
- apiVersion: "project.cattle.io/v3"
kindsRegexp: "."
- apiVersion: "catalog.cattle.io/v1"
kindsRegexp: "^clusterrepos$"
- apiVersion: "resources.cattle.io/v1"
kindsRegexp: "^ResourceSet$"
- apiVersion: "v1"
kindsRegexp: "^secrets$"
namespaceRegexp: "^.*$"
labelSelectors:
matchExpressions:
- key: "resources.cattle.io/backup"
operator: "In"
values: ["true"]

View File

@ -0,0 +1,87 @@
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}
{{/*
Windows cluster will add default taint for linux nodes,
add below linux tolerations to workloads could be scheduled to those linux nodes
*/}}
{{- define "linux-node-tolerations" -}}
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
{{- end -}}
{{- define "linux-node-selector" -}}
{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}}
beta.kubernetes.io/os: linux
{{- else -}}
kubernetes.io/os: linux
{{- end -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "backupRestore.fullname" -}}
{{- .Chart.Name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "backupRestore.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "backupRestore.labels" -}}
helm.sh/chart: {{ include "backupRestore.chart" . }}
{{ include "backupRestore.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "backupRestore.selectorLabels" -}}
app.kubernetes.io/name: {{ include "backupRestore.fullname" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
resources.cattle.io/operator: backup-restore
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "backupRestore.serviceAccountName" -}}
{{ include "backupRestore.fullname" . }}
{{- end }}
{{- define "backupRestore.s3SecretName" -}}
{{- printf "%s-%s" .Chart.Name "s3" | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create PVC name using release and revision number, unless a volumeName is given.
*/}}
{{- define "backupRestore.pvcName" -}}
{{- if and .Values.persistence.volumeName }}
{{- printf "%s" .Values.persistence.volumeName }}
{{- else -}}
{{- printf "%s-%d" .Release.Name .Release.Revision }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,14 @@
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "backupRestore.fullname" . }}
labels:
{{- include "backupRestore.labels" . | nindent 4 }}
subjects:
- kind: ServiceAccount
name: {{ include "backupRestore.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: cluster-admin
apiGroup: rbac.authorization.k8s.io

View File

@ -0,0 +1,79 @@
{{- if and .Values.s3.enabled .Values.persistence.enabled }}
{{- fail "\n\nCannot configure both s3 and PV for storing backups" }}
{{- end }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "backupRestore.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "backupRestore.labels" . | nindent 4 }}
spec:
selector:
matchLabels:
{{- include "backupRestore.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "backupRestore.selectorLabels" . | nindent 8 }}
annotations:
checksum/s3: {{ include (print $.Template.BasePath "/s3-secret.yaml") . | sha256sum }}
checksum/pvc: {{ include (print $.Template.BasePath "/pvc.yaml") . | sha256sum }}
spec:
serviceAccountName: {{ include "backupRestore.serviceAccountName" . }}
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
{{ toYaml .Values.imagePullSecrets | indent 6 }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: {{ .Values.priorityClassName }}
{{- end }}
containers:
- name: {{ .Chart.Name }}
image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}
imagePullPolicy: {{ default "Always" .Values.imagePullPolicy }}
args:
{{- if .Values.debug }}
- "--debug"
{{- end }}
{{- if .Values.trace }}
- "--trace"
{{- end }}
env:
- name: CHART_NAMESPACE
value: {{ .Release.Namespace }}
{{- if .Values.s3.enabled }}
- name: DEFAULT_S3_BACKUP_STORAGE_LOCATION
value: {{ include "backupRestore.s3SecretName" . }}
{{- end }}
{{- if .Values.proxy }}
- name: HTTP_PROXY
value: {{ .Values.proxy }}
- name: HTTPS_PROXY
value: {{ .Values.proxy }}
- name: NO_PROXY
value: {{ .Values.noProxy }}
{{- end }}
{{- if .Values.persistence.enabled }}
- name: DEFAULT_PERSISTENCE_ENABLED
value: "persistence-enabled"
volumeMounts:
- mountPath: "/var/lib/backups"
name: pv-storage
volumes:
- name: pv-storage
persistentVolumeClaim:
claimName: {{ include "backupRestore.pvcName" . }}
{{- end }}
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.nodeSelector }}
{{ toYaml .Values.nodeSelector | indent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.tolerations }}
{{ toYaml .Values.tolerations | indent 8 }}
{{- end }}

View File

@ -0,0 +1,124 @@
apiVersion: batch/v1
kind: Job
metadata:
name: {{ include "backupRestore.fullname" . }}-patch-sa
namespace: {{ .Release.Namespace }}
labels: {{ include "backupRestore.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": post-install, post-upgrade
"helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation
spec:
backoffLimit: 1
template:
spec:
serviceAccountName: {{ include "backupRestore.fullname" . }}-patch-sa
securityContext:
runAsNonRoot: true
runAsUser: 1000
restartPolicy: Never
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.nodeSelector }}
{{ toYaml .Values.nodeSelector | indent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.tolerations }}
{{ toYaml .Values.tolerations | indent 8 }}
{{- end }}
containers:
- name: {{ include "backupRestore.fullname" . }}-patch-sa
image: {{ include "system_default_registry" . }}{{ .Values.global.kubectl.repository }}:{{ .Values.global.kubectl.tag }}
imagePullPolicy: IfNotPresent
command: ["kubectl", "-n", {{ .Release.Namespace | quote }}, "patch", "serviceaccount", "default", "-p", "{\"automountServiceAccountToken\": false}"]
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "backupRestore.fullname" . }}-patch-sa
namespace: {{ .Release.Namespace }}
labels: {{ include "backupRestore.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": post-install, post-upgrade
"helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "backupRestore.fullname" . }}-patch-sa
labels: {{ include "backupRestore.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": post-install, post-upgrade
"helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation
rules:
- apiGroups: [""]
resources: ["serviceaccounts"]
verbs: ["get", "patch"]
{{- if .Values.global.cattle.psp.enabled}}
- apiGroups: ["policy"]
resources: ["podsecuritypolicies"]
verbs: ["use"]
resourceNames:
- {{ include "backupRestore.fullname" . }}-patch-sa
{{- end}}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "backupRestore.fullname" . }}-patch-sa
labels: {{ include "backupRestore.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": post-install, post-upgrade
"helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ include "backupRestore.fullname" . }}-patch-sa
subjects:
- kind: ServiceAccount
name: {{ include "backupRestore.fullname" . }}-patch-sa
namespace: {{ .Release.Namespace }}
---
{{- if .Values.global.cattle.psp.enabled}}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ include "backupRestore.fullname" . }}-patch-sa
labels: {{ include "backupRestore.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": post-install, post-upgrade
"helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation
spec:
privileged: false
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'MustRunAsNonRoot'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
readOnlyRootFilesystem: false
volumes:
- 'secret'
{{- end}}
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: {{ include "backupRestore.fullname" . }}-default-allow-all
namespace: {{ .Release.Namespace }}
spec:
podSelector: {}
egress:
- {}
policyTypes:
- Ingress
- Egress

View File

@ -0,0 +1,31 @@
{{- if .Values.global.cattle.psp.enabled -}}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ include "backupRestore.fullname" . }}-psp
labels: {{ include "backupRestore.labels" . | nindent 4 }}
spec:
privileged: false
allowPrivilegeEscalation: false
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'MustRunAsNonRoot'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
readOnlyRootFilesystem: false
volumes:
- 'persistentVolumeClaim'
- 'secret'
{{- end -}}

View File

@ -0,0 +1,27 @@
{{- if and .Values.persistence.enabled -}}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ include "backupRestore.pvcName" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "backupRestore.labels" . | nindent 4 }}
spec:
accessModes:
- ReadWriteOnce
resources:
{{- with .Values.persistence }}
requests:
storage: {{ .size | quote }}
{{- if .storageClass }}
{{- if (eq "-" .storageClass) }}
storageClassName: ""
{{- else }}
storageClassName: {{ .storageClass | quote }}
{{- end }}
{{- end }}
{{- if .volumeName }}
volumeName: {{ .volumeName | quote }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,13 @@
apiVersion: resources.cattle.io/v1
kind: ResourceSet
metadata:
name: rancher-resource-set
controllerReferences:
- apiVersion: "apps/v1"
resource: "deployments"
name: "rancher"
namespace: "cattle-system"
resourceSelectors:
{{- range $path, $_ := .Files.Glob "files/default-resourceset-contents/*.yaml" -}}
{{- $.Files.Get $path | nindent 2 -}}
{{- end -}}

View File

@ -0,0 +1,31 @@
{{- if .Values.s3.enabled -}}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "backupRestore.s3SecretName" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "backupRestore.labels" . | nindent 4 }}
type: Opaque
stringData:
{{- with .Values.s3 }}
{{- if .credentialSecretName }}
credentialSecretName: {{ .credentialSecretName }}
credentialSecretNamespace: {{ required "When providing a Secret containing S3 credentials, a valid .Values.credentialSecretNamespace must be provided" .credentialSecretNamespace }}
{{- end }}
{{- if .region }}
region: {{ .region | quote }}
{{- end }}
bucketName: {{ required "A valid .Values.bucketName is required for configuring S3 compatible storage as the default backup storage location" .bucketName | quote }}
{{- if .folder }}
folder: {{ .folder | quote }}
{{- end }}
endpoint: {{ required "A valid .Values.endpoint is required for configuring S3 compatible storage as the default backup storage location" .endpoint | quote }}
{{- if .endpointCA }}
endpointCA: {{ .endpointCA }}
{{- end }}
{{- if .insecureTLSSkipVerify }}
insecureTLSSkipVerify: {{ .insecureTLSSkipVerify | quote }}
{{- end }}
{{- end }}
{{ end }}

View File

@ -0,0 +1,11 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "backupRestore.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "backupRestore.labels" . | nindent 4 }}
{{- if .Values.serviceAccount.annotations }}
annotations:
{{- toYaml .Values.serviceAccount.annotations | nindent 4 }}
{{- end }}

View File

@ -0,0 +1,16 @@
#{{- if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 -}}
# {{- $found := dict -}}
# {{- set $found "resources.cattle.io/v1/Backup" false -}}
# {{- set $found "resources.cattle.io/v1/ResourceSet" false -}}
# {{- set $found "resources.cattle.io/v1/Restore" false -}}
# {{- range .Capabilities.APIVersions -}}
# {{- if hasKey $found (toString .) -}}
# {{- set $found (toString .) true -}}
# {{- end -}}
# {{- end -}}
# {{- range $_, $exists := $found -}}
# {{- if (eq $exists false) -}}
# {{- required "Required CRDs are missing. Please install the corresponding CRD chart before installing this chart." "" -}}
# {{- end -}}
# {{- end -}}
#{{- end -}}

View File

@ -0,0 +1,7 @@
#{{- if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 -}}
#{{- if .Values.global.cattle.psp.enabled }}
#{{- if not (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }}
#{{- fail "The target cluster does not have the PodSecurityPolicy API resource. Please disable PSPs in this chart before proceeding." -}}
#{{- end }}
#{{- end }}
#{{- end }}

View File

@ -0,0 +1,216 @@
suite: Test Deployment
templates:
- deployment.yaml
- s3-secret.yaml
- pvc.yaml
- _helpers.tpl
tests:
- it: should set name
template: deployment.yaml
asserts:
- equal:
path: metadata.name
value: "rancher-backup"
- it: should set namespace
template: deployment.yaml
asserts:
- equal:
path: metadata.namespace
value: "NAMESPACE"
- it: should set priorityClassName
set:
priorityClassName: "testClass"
template: deployment.yaml
asserts:
- equal:
path: spec.template.spec.priorityClassName
value: "testClass"
- it: should set default imagePullPolicy
template: deployment.yaml
asserts:
- equal:
path: spec.template.spec.containers[0].imagePullPolicy
value: "Always"
- it: should set imagePullPolicy
set:
imagePullPolicy: "IfNotPresent"
template: deployment.yaml
asserts:
- equal:
path: spec.template.spec.containers[0].imagePullPolicy
value: "IfNotPresent"
- it: should set debug loglevel
set:
debug: true
template: deployment.yaml
asserts:
- contains:
path: spec.template.spec.containers[0].args
content: "--debug"
- it: should set trace loglevel
set:
trace: true
template: deployment.yaml
asserts:
- contains:
path: spec.template.spec.containers[0].args
content: "--trace"
- it: should set proxy environment variables
set:
proxy: "https://127.0.0.1:3128"
template: deployment.yaml
asserts:
- contains:
path: spec.template.spec.containers[0].env
content:
name: HTTP_PROXY
value: "https://127.0.0.1:3128"
- contains:
path: spec.template.spec.containers[0].env
content:
name: HTTPS_PROXY
value: "https://127.0.0.1:3128"
- contains:
path: spec.template.spec.containers[0].env
content:
name: NO_PROXY
value: "127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.svc,.cluster.local"
- it: should set proxy environment variables with modified noproxy
set:
proxy: "https://127.0.0.1:3128"
noProxy: "192.168.0.0/24"
template: deployment.yaml
asserts:
- contains:
path: spec.template.spec.containers[0].env
content:
name: NO_PROXY
value: "192.168.0.0/24"
- it: should set persistence variables
set:
persistence.enabled: true
template: deployment.yaml
asserts:
- contains:
path: spec.template.spec.containers[0].env
content:
name: DEFAULT_PERSISTENCE_ENABLED
value: "persistence-enabled"
- contains:
path: spec.template.spec.containers[0].volumeMounts
content:
mountPath: "/var/lib/backups"
name: "pv-storage"
- equal:
path: spec.template.spec.volumes[0].name
value: "pv-storage"
- equal:
path: spec.template.spec.volumes[0].persistentVolumeClaim
value:
claimName: RELEASE-NAME-0
- it: should set claim from custom static volumeName
set:
persistence.enabled: true
persistence.volumeName: "PREDEFINED-VOLUME"
persistence.storageClass: "PREDEFINED-STORAGECLASS"
persistence.size: "PREDIFINED-SAMEAS-PVSIZE"
template: deployment.yaml
asserts:
- contains:
path: spec.template.spec.containers[0].env
content:
name: DEFAULT_PERSISTENCE_ENABLED
value: "persistence-enabled"
- equal:
path: spec.template.spec.volumes[0].persistentVolumeClaim
value:
claimName: PREDEFINED-VOLUME
- it: should set private registry
template: deployment.yaml
set:
global.cattle.systemDefaultRegistry: "my.registry.local:3000"
asserts:
- matchRegex:
path: spec.template.spec.containers[0].image
pattern: ^my.registry.local:3000/rancher/backup-restore-operator:.*$
- it: should set nodeselector
template: deployment.yaml
asserts:
- equal:
path: spec.template.spec.nodeSelector
value:
kubernetes.io/os: linux
- it: should not set default affinity
template: deployment.yaml
asserts:
- isNull:
path: spec.template.spec.affinity
- it: should set custom affinity
template: deployment.yaml
set:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: disktype
operator: In
values:
- ssd
asserts:
- equal:
path: spec.template.spec.affinity
value:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: disktype
operator: In
values:
- ssd
- it: should set tolerations
template: deployment.yaml
asserts:
- equal:
path: spec.template.spec.tolerations[0]
value:
key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
- it: should set custom tolerations
template: deployment.yaml
set:
tolerations:
- key: "example-key"
operator: "Exists"
effect: "NoSchedule"
asserts:
- equal:
path: spec.template.spec.tolerations[0]
value:
key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
- equal:
path: spec.template.spec.tolerations[1]
value:
key: "example-key"
operator: "Exists"
effect: "NoSchedule"
- it: should not set default imagePullSecrets
template: deployment.yaml
asserts:
- isNull:
path: spec.template.spec.imagePullSecrets
- it: should set imagePullSecrets
set:
imagePullSecrets:
- name: "pull-secret"
template: deployment.yaml
asserts:
- equal:
path: spec.template.spec.imagePullSecrets[0].name
value: "pull-secret"

View File

@ -0,0 +1,102 @@
suite: Test PVC
templates:
- pvc.yaml
- _helpers.tpl
tests:
- it: should set name
template: pvc.yaml
set:
persistence:
enabled: true
asserts:
- equal:
path: metadata.name
value: "RELEASE-NAME-0"
- it: should set namespace
template: pvc.yaml
set:
persistence:
enabled: true
asserts:
- equal:
path: metadata.namespace
value: "NAMESPACE"
- it: should set accessModes
template: pvc.yaml
set:
persistence:
enabled: true
asserts:
- equal:
path: spec.accessModes[0]
value: "ReadWriteOnce"
- it: should set size
template: pvc.yaml
set:
persistence:
enabled: true
asserts:
- equal:
path: spec.resources.requests.storage
value: "2Gi"
- it: should set size
template: pvc.yaml
set:
persistence:
enabled: true
size: "10Gi"
asserts:
- equal:
path: spec.resources.requests.storage
value: "10Gi"
- it: should not set volumeName
template: pvc.yaml
set:
persistence:
enabled: true
asserts:
- isNull:
path: spec.volumeName
- it: should set default storageClass
template: pvc.yaml
set:
persistence:
enabled: true
asserts:
- equal:
path: spec.storageClassName
value: ""
- it: should set custom storageClass
template: pvc.yaml
set:
persistence:
enabled: true
storageClass: "storage-class"
asserts:
- equal:
path: spec.storageClassName
value: "storage-class"
- it: should set custom volumeName
template: pvc.yaml
set:
persistence:
enabled: true
volumeName: "volume-name"
asserts:
- equal:
path: spec.volumeName
value: "volume-name"
- it: should set claim from custom static volumeName
set:
persistence.enabled: true
persistence.volumeName: "PREDEFINED-VOLUME"
persistence.storageClass: "PREDEFINED-STORAGECLASS"
persistence.size: "PREDEFINED-SAMEAS-PVSIZE"
template: pvc.yaml
asserts:
- equal:
path: spec.resources.requests.storage
value: "PREDEFINED-SAMEAS-PVSIZE"
- equal:
path: spec.storageClassName
value: "PREDEFINED-STORAGECLASS"

View File

@ -0,0 +1,141 @@
suite: Test S3 Secret
templates:
- s3-secret.yaml
- _helpers.tpl
tests:
- it: should set name
template: s3-secret.yaml
set:
s3:
enabled: true
bucketName: "yourbucket"
endpoint: "https://s3.amazonaws.com"
asserts:
- equal:
path: metadata.name
value: "rancher-backup-s3"
- it: should set namespace
template: s3-secret.yaml
set:
s3:
enabled: true
bucketName: "yourbucket"
endpoint: "https://s3.amazonaws.com"
asserts:
- equal:
path: metadata.namespace
value: "NAMESPACE"
- it: should not set credentialSecretName
template: s3-secret.yaml
set:
s3:
enabled: true
bucketName: "yourbucket"
endpoint: "https://s3.amazonaws.com"
asserts:
- isNull:
path: stringData.credentialSecretName
- it: should set credentialSecretName
template: s3-secret.yaml
set:
s3:
enabled: true
bucketName: "yourbucket"
endpoint: "https://s3.amazonaws.com"
credentialSecretName: "credential-secret-name"
credentialSecretNamespace: "credential-secret-namespace"
asserts:
- equal:
path: stringData.credentialSecretName
value: "credential-secret-name"
- equal:
path: stringData.credentialSecretNamespace
value: "credential-secret-namespace"
- it: should not set folder
template: s3-secret.yaml
set:
s3:
enabled: true
bucketName: "yourbucket"
endpoint: "https://s3.amazonaws.com"
asserts:
- isNull:
path: stringData.folder
- it: should set folder
template: s3-secret.yaml
set:
s3:
enabled: true
bucketName: "yourbucket"
endpoint: "https://s3.amazonaws.com"
folder: "myfolder"
asserts:
- equal:
path: stringData.folder
value: "myfolder"
- it: should not set region
template: s3-secret.yaml
set:
s3:
enabled: true
bucketName: "yourbucket"
endpoint: "https://s3.amazonaws.com"
asserts:
- isNull:
path: stringData.region
- it: should set region
template: s3-secret.yaml
set:
s3:
enabled: true
bucketName: "yourbucket"
endpoint: "https://s3.amazonaws.com"
region: "us-west-1"
asserts:
- equal:
path: stringData.region
value: "us-west-1"
- it: should not set endpointCA
template: s3-secret.yaml
set:
s3:
enabled: true
bucketName: "yourbucket"
endpoint: "https://s3.amazonaws.com"
asserts:
- isNull:
path: stringData.endpointCA
- it: should set endpointCA
template: s3-secret.yaml
set:
s3:
enabled: true
bucketName: "yourbucket"
endpoint: "https://s3.amazonaws.com"
endpointCA: "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURHakNDQWdLZ0F3SUJBZ0lKQUtpWFZpNEpBb0J5TUEwR0NTcUdTSWIzRFFFQkN3VUFNQkl4RURBT0JnTlYKQkFNTUIzUmxjM1F0WTJFd0hoY05NakF3T0RNd01UZ3lOVFE1V2hjTk1qQXhNREk1TVRneU5UUTVXakFTTVJBdwpEZ1lEVlFRRERBZDBaWE4wTFdOaE1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBCjA4dnV3Q2Y0SEhtR2Q2azVNTmozRW5NOG00T2RpS3czSGszd1NlOUlXQkwyVzY5WDZxenBhN2I2M3U2L05mMnkKSnZWNDVqeXplRFB6bFJycjlpbEpWaVZ1NFNqWlFjdG9jWmFCaVNsL0xDbEFDdkFaUlYvKzN0TFVTZSs1ZDY0QQpWcUhDQlZObU5xM3E3aVY0TE1aSVpRc3N6K0FxaU1Sd0pOMVVKQTZ6V0tUc2Yzc3ByQ0J2dWxJWmZsVXVETVAyCnRCTCt6cXZEc0pDdWlhNEEvU2JNT29tVmM2WnNtTGkwMjdub3dGRld3MnRpSkM5d0xMRE14NnJoVHQ4a3VvVHYKQXJpUjB4WktiRU45L1Uzb011eUVKbHZyck9YS2ZuUDUwbk8ycGNaQnZCb3pUTStYZnRvQ1d5UnhKUmI5cFNTRApKQjlmUEFtLzNZcFpMMGRKY2sxR1h3SURBUUFCbzNNd2NUQWRCZ05WSFE0RUZnUVU5NHU4WXlMdmE2MTJnT1pyCm44QnlFQ2NucVFjd1FnWURWUjBqQkRzd09ZQVU5NHU4WXlMdmE2MTJnT1pybjhCeUVDY25xUWVoRnFRVU1CSXgKRURBT0JnTlZCQU1NQjNSbGMzUXRZMkdDQ1FDb2wxWXVDUUtBY2pBTUJnTlZIUk1FQlRBREFRSC9NQTBHQ1NxRwpTSWIzRFFFQkN3VUFBNElCQVFER1JRZ1RtdzdVNXRQRHA5Q2psOXlLRW9Vd2pYWWM2UlAwdm1GSHpubXJ3dUVLCjFrTkVJNzhBTUw1MEpuS29CY0ljVDNEeGQ3TGdIbTNCRE5mVVh2anArNnZqaXhJYXR2UWhsSFNVaWIyZjJsSTkKVEMxNzVyNCtROFkzelc1RlFXSDdLK08vY3pJTGh5ei93aHRDUlFkQ29lS1dXZkFiby8wd0VSejZzNkhkVFJzNwpHcWlGNWZtWGp6S0lOcTBjMHRyZ0xtalNKd1hwSnU0ZnNGOEcyZUh4b2pOKzdJQ1FuSkg5cGRIRVpUQUtOL2ppCnIvem04RlZtd1kvdTBndEZneWVQY1ZWbXBqRm03Y0ZOSkc4Y2ZYd0QzcEFwVjhVOGNocTZGeFBHTkVvWFZnclMKY1VRMklaU0RJd1FFY3FvSzFKSGdCUWw2RXBaUVpWMW1DRklrdFBwSQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t"
asserts:
- equal:
path: stringData.endpointCA
value: "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURHakNDQWdLZ0F3SUJBZ0lKQUtpWFZpNEpBb0J5TUEwR0NTcUdTSWIzRFFFQkN3VUFNQkl4RURBT0JnTlYKQkFNTUIzUmxjM1F0WTJFd0hoY05NakF3T0RNd01UZ3lOVFE1V2hjTk1qQXhNREk1TVRneU5UUTVXakFTTVJBdwpEZ1lEVlFRRERBZDBaWE4wTFdOaE1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBCjA4dnV3Q2Y0SEhtR2Q2azVNTmozRW5NOG00T2RpS3czSGszd1NlOUlXQkwyVzY5WDZxenBhN2I2M3U2L05mMnkKSnZWNDVqeXplRFB6bFJycjlpbEpWaVZ1NFNqWlFjdG9jWmFCaVNsL0xDbEFDdkFaUlYvKzN0TFVTZSs1ZDY0QQpWcUhDQlZObU5xM3E3aVY0TE1aSVpRc3N6K0FxaU1Sd0pOMVVKQTZ6V0tUc2Yzc3ByQ0J2dWxJWmZsVXVETVAyCnRCTCt6cXZEc0pDdWlhNEEvU2JNT29tVmM2WnNtTGkwMjdub3dGRld3MnRpSkM5d0xMRE14NnJoVHQ4a3VvVHYKQXJpUjB4WktiRU45L1Uzb011eUVKbHZyck9YS2ZuUDUwbk8ycGNaQnZCb3pUTStYZnRvQ1d5UnhKUmI5cFNTRApKQjlmUEFtLzNZcFpMMGRKY2sxR1h3SURBUUFCbzNNd2NUQWRCZ05WSFE0RUZnUVU5NHU4WXlMdmE2MTJnT1pyCm44QnlFQ2NucVFjd1FnWURWUjBqQkRzd09ZQVU5NHU4WXlMdmE2MTJnT1pybjhCeUVDY25xUWVoRnFRVU1CSXgKRURBT0JnTlZCQU1NQjNSbGMzUXRZMkdDQ1FDb2wxWXVDUUtBY2pBTUJnTlZIUk1FQlRBREFRSC9NQTBHQ1NxRwpTSWIzRFFFQkN3VUFBNElCQVFER1JRZ1RtdzdVNXRQRHA5Q2psOXlLRW9Vd2pYWWM2UlAwdm1GSHpubXJ3dUVLCjFrTkVJNzhBTUw1MEpuS29CY0ljVDNEeGQ3TGdIbTNCRE5mVVh2anArNnZqaXhJYXR2UWhsSFNVaWIyZjJsSTkKVEMxNzVyNCtROFkzelc1RlFXSDdLK08vY3pJTGh5ei93aHRDUlFkQ29lS1dXZkFiby8wd0VSejZzNkhkVFJzNwpHcWlGNWZtWGp6S0lOcTBjMHRyZ0xtalNKd1hwSnU0ZnNGOEcyZUh4b2pOKzdJQ1FuSkg5cGRIRVpUQUtOL2ppCnIvem04RlZtd1kvdTBndEZneWVQY1ZWbXBqRm03Y0ZOSkc4Y2ZYd0QzcEFwVjhVOGNocTZGeFBHTkVvWFZnclMKY1VRMklaU0RJd1FFY3FvSzFKSGdCUWw2RXBaUVpWMW1DRklrdFBwSQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t"
- it: should not set insecureTLSSkipVerify
template: s3-secret.yaml
set:
s3:
enabled: true
bucketName: "yourbucket"
endpoint: "https://s3.amazonaws.com"
asserts:
- isNull:
path: stringData.insecureTLSSkipVerify
- it: should set insecureTLSSkipVerify
template: s3-secret.yaml
set:
s3:
enabled: true
bucketName: "yourbucket"
endpoint: "https://s3.amazonaws.com"
insecureTLSSkipVerify: "true"
asserts:
- equal:
path: stringData.insecureTLSSkipVerify
value: "true"

View File

@ -0,0 +1,81 @@
image:
repository: rancher/backup-restore-operator
tag: v3.1.1-rc2
## Default s3 bucket for storing all backup files created by the backup-restore-operator
s3:
enabled: false
## credentialSecretName if set, should be the name of the Secret containing AWS credentials.
## To use IAM Role, don't set this field
credentialSecretName: ""
credentialSecretNamespace: ""
region: ""
bucketName: ""
folder: ""
endpoint: ""
endpointCA: ""
insecureTLSSkipVerify: false
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
## If persistence is enabled, operator will create a PVC with mountPath /var/lib/backups
persistence:
enabled: false
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack).
## Refer https://kubernetes.io/docs/concepts/storage/persistent-volumes/#class-1
##
storageClass: "-"
## If you want to disable dynamic provisioning by setting storageClass to "-" above,
## and want to target a particular PV, provide name of the target volume
volumeName: ""
## Only certain StorageClasses allow resizing PVs; Refer https://kubernetes.io/blog/2018/07/12/resizing-persistent-volumes-using-kubernetes/
size: 2Gi
# Add log level flags to backup-restore
debug: false
trace: false
# http[s] proxy server passed to backup client
# proxy: http://<username>@<password>:<url>:<port>
# comma separated list of domains or ip addresses that will not use the proxy
noProxy: 127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.svc,.cluster.local
global:
cattle:
systemDefaultRegistry: ""
psp:
enabled: false # PSP enablement should default to false
kubectl:
repository: rancher/kubectl
tag: v1.21.9
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## List of node taints to tolerate (requires Kubernetes >= 1.6)
tolerations: []
affinity: {}
serviceAccount:
annotations: {}
priorityClassName: ""
# Override imagePullPolicy for image
# options: Always, Never, IfNotPresent
# Defaults to Always
imagePullPolicy: "Always"
## Optional array of imagePullSecrets containing private registry credentials
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
imagePullSecrets: []

View File

@ -3254,6 +3254,37 @@ entries:
- assets/longhorn-crd/longhorn-crd-1.0.200.tgz
version: 1.0.200
neuvector:
- annotations:
catalog.cattle.io/auto-install: neuvector-crd=match
catalog.cattle.io/certified: rancher
catalog.cattle.io/display-name: NeuVector
catalog.cattle.io/kube-version: '>=1.18.0-0 < 1.27.0-0'
catalog.cattle.io/namespace: cattle-neuvector-system
catalog.cattle.io/os: linux
catalog.cattle.io/permit-os: linux
catalog.cattle.io/provides-gvr: neuvector.com/v1
catalog.cattle.io/rancher-version: '>= 2.7.0-0 < 2.8.0-0'
catalog.cattle.io/release-name: neuvector
catalog.cattle.io/type: cluster-tool
catalog.cattle.io/upstream-version: 2.4.3
apiVersion: v1
appVersion: 5.1.2
created: "2023-04-14T13:51:34.8236025-07:00"
description: Helm feature chart for NeuVector's core services
digest: 610ea7fd0bd63e20b795776e888caa6e9abc4fc88749cfefe1715d2b09f6c09d
home: https://neuvector.com
icon: https://avatars2.githubusercontent.com/u/19367275?s=200&v=4
keywords:
- security
maintainers:
- email: support@neuvector.com
name: becitsthere
name: neuvector
sources:
- https://github.com/neuvector/neuvector
urls:
- assets/neuvector/neuvector-102.0.1+up2.4.3.tgz
version: 102.0.1+up2.4.3
- annotations:
catalog.cattle.io/auto-install: neuvector-crd=match
catalog.cattle.io/certified: rancher
@ -3478,6 +3509,26 @@ entries:
- assets/neuvector/neuvector-100.0.0+up2.2.0.tgz
version: 100.0.0+up2.2.0
neuvector-crd:
- annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/namespace: cattle-neuvector-system
catalog.cattle.io/release-name: neuvector-crd
apiVersion: v1
appVersion: 5.1.2
created: "2023-04-14T13:51:34.829132369-07:00"
description: Helm chart for NeuVector's CRD services
digest: eac79c9aeb21c9a7de60345c0c3a432850d9391a9a40243f94bebf5b7ccc9a1b
home: https://neuvector.com
icon: https://avatars2.githubusercontent.com/u/19367275?s=200&v=4
maintainers:
- email: support@neuvector.com
name: becitsthere
name: neuvector-crd
type: application
urls:
- assets/neuvector-crd/neuvector-crd-102.0.1+up2.4.3.tgz
version: 102.0.1+up2.4.3
- annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
@ -4466,6 +4517,36 @@ entries:
- assets/rancher-alerting-drivers/rancher-alerting-drivers-1.0.100.tgz
version: 1.0.100
rancher-backup:
- annotations:
catalog.cattle.io/auto-install: rancher-backup-crd=match
catalog.cattle.io/certified: rancher
catalog.cattle.io/display-name: Rancher Backups
catalog.cattle.io/kube-version: '>= 1.16.0-0 < 1.26.0-0'
catalog.cattle.io/namespace: cattle-resources-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/provides-gvr: resources.cattle.io.resourceset/v1
catalog.cattle.io/rancher-version: '>= 2.7.0-0 < 2.8.0-0'
catalog.cattle.io/release-name: rancher-backup
catalog.cattle.io/scope: management
catalog.cattle.io/type: cluster-tool
catalog.cattle.io/ui-component: rancher-backup
catalog.cattle.io/upstream-version: 2.1.1
apiVersion: v2
appVersion: 3.1.1-rc2
created: "2023-04-14T12:23:20.885559-07:00"
description: Provides ability to back up and restore the Rancher application running
on any Kubernetes cluster
digest: 483dc0d1f0581761b00cfbb08fa5078f090cdbfecafcf246146aee09fa8a212f
icon: https://charts.rancher.io/assets/logos/backup-restore.svg
keywords:
- applications
- infrastructure
kubeVersion: '>= 1.16.0-0'
name: rancher-backup
urls:
- assets/rancher-backup/rancher-backup-102.0.1+up3.1.1-rc2.tgz
version: 102.0.1+up3.1.1-rc2
- annotations:
catalog.cattle.io/auto-install: rancher-backup-crd=match
catalog.cattle.io/certified: rancher
@ -4934,6 +5015,21 @@ entries:
- assets/rancher-backup/rancher-backup-1.0.200.tgz
version: 1.0.200
rancher-backup-crd:
- annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/namespace: cattle-resources-system
catalog.cattle.io/release-name: rancher-backup-crd
apiVersion: v2
appVersion: 3.1.1-rc2
created: "2023-04-14T12:23:22.375816-07:00"
description: Installs the CRDs for rancher-backup.
digest: 1d37fbeb3ac06bc3da3ef057d50646cc03f396d5f73175f404d04ff9782818d7
name: rancher-backup-crd
type: application
urls:
- assets/rancher-backup-crd/rancher-backup-crd-102.0.1+up3.1.1-rc2.tgz
version: 102.0.1+up3.1.1-rc2
- annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"

View File

@ -688,6 +688,8 @@ spec:
type: string
op:
type: string
path:
type: string
sub_criteria:
items:
properties:
@ -703,8 +705,14 @@ spec:
- value
type: object
type: array
template_kind:
type: string
type:
type: string
value:
type: string
value_type:
type: string
required:
- name
- op

View File

@ -688,6 +688,8 @@ spec:
type: string
op:
type: string
path:
type: string
sub_criteria:
items:
properties:
@ -703,8 +705,14 @@ spec:
- value
type: object
type: array
template_kind:
type: string
type:
type: string
value:
type: string
value_type:
type: string
required:
- name
- op

View File

@ -102,7 +102,7 @@ questions:
label: Runtime Path
- variable: k3s.enabled
default: "false"
description: k3s containerd runtime. Enable only one runtime
description: k3s containerd runtime. Enable only one runtime. Choose this option for RKE2 and K3S based clusters
type: boolean
label: k3s Containerd Runtime
show_subquestion_if: true
@ -116,7 +116,7 @@ questions:
#storage configurations
- variable: controller.pvc.enabled
default: false
description: If true, enable persistence for controller using PVC
description: If true, enable persistence for controller using PVC. PVC should support ReadWriteMany(RWX)
type: boolean
label: PVC Status
group: "PVC Configuration"

View File

@ -13,9 +13,9 @@
+ catalog.cattle.io/rancher-version: '>= 2.7.0-0 < 2.8.0-0'
+ catalog.cattle.io/release-name: neuvector
+ catalog.cattle.io/type: cluster-tool
+ catalog.cattle.io/upstream-version: 2.4.2
+ catalog.cattle.io/upstream-version: 2.4.3
apiVersion: v1
appVersion: 5.1.1
appVersion: 5.1.2
-description: Helm chart for NeuVector's core services
+description: Helm feature chart for NeuVector's core services
home: https://neuvector.com
@ -29,4 +29,4 @@
+name: neuvector
+sources:
+- https://github.com/neuvector/neuvector
version: 2.4.2
version: 2.4.3

View File

@ -5,25 +5,25 @@
`controller.affinity` | controller affinity rules | ... | spread controllers to different nodes |
`controller.tolerations` | List of node taints to tolerate | `nil` |
-`controller.resources` | Add resources requests and limits to controller deployment | `{}` | see examples in [values.yaml](values.yaml)
+`controller.resources` | Add resources requests and limits to controller deployment | `{}` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/tree/2.4.2/charts/core/values.yaml)
+`controller.resources` | Add resources requests and limits to controller deployment | `{}` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/tree/2.4.3/charts/core/values.yaml)
`controller.nodeSelector` | Enable and specify nodeSelector labels | `{}` |
`controller.disruptionbudget` | controller PodDisruptionBudget. 0 to disable. Recommended value: 2. | `0` |
`controller.priorityClassName` | controller priorityClassName. Must exist prior to helm deployment. Leave empty to disable. | `nil` |
@@ -71,7 +71,7 @@
@@ -72,7 +72,7 @@
`controller.federation.mastersvc.ingress.ingressClassName` | To be used instead of the ingress.class annotation if an IngressClass is provisioned | `""` |
`controller.federation.mastersvc.ingress.secretName` | Name of the secret to be used for TLS-encryption | `nil` | Secret must be created separately (Let's encrypt, manually)
`controller.federation.mastersvc.ingress.path` | Set ingress path |`/` | If set, it might be necessary to set a rewrite rule in annotations.
-`controller.federation.mastersvc.ingress.annotations` | Add annotations to ingress to influence behavior | `nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"` | see examples in [values.yaml](values.yaml)
+`controller.federation.mastersvc.ingress.annotations` | Add annotations to ingress to influence behavior | `nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/tree/2.4.2/charts/core/values.yaml)
+`controller.federation.mastersvc.ingress.annotations` | Add annotations to ingress to influence behavior | `nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/tree/2.4.3/charts/core/values.yaml)
`controller.federation.managedsvc.type` | Multi-cluster managed cluster service type. If specified, the deployment will be managed by the managed clsuter. Possible values include NodePort, LoadBalancer and ClusterIP. | `nil` |
`controller.federation.managedsvc.annotations` | Add annotations to Multi-cluster managed cluster REST API service | `{}` |
`controller.federation.managedsvc.route.enabled` | If true, create a OpenShift route to expose the Multi-cluster managed cluster service | `false` |
@@ -87,14 +87,14 @@
@@ -88,14 +88,14 @@
`controller.federation.managedsvc.ingress.ingressClassName` | To be used instead of the ingress.class annotation if an IngressClass is provisioned | `""` |
`controller.federation.managedsvc.ingress.secretName` | Name of the secret to be used for TLS-encryption | `nil` | Secret must be created separately (Let's encrypt, manually)
`controller.federation.managedsvc.ingress.path` | Set ingress path |`/` | If set, it might be necessary to set a rewrite rule in annotations.
-`controller.federation.managedsvc.ingress.annotations` | Add annotations to ingress to influence behavior | `nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"` | see examples in [values.yaml](values.yaml)
+`controller.federation.managedsvc.ingress.annotations` | Add annotations to ingress to influence behavior | `nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/tree/2.4.2/charts/core/values.yaml)
+`controller.federation.managedsvc.ingress.annotations` | Add annotations to ingress to influence behavior | `nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/tree/2.4.3/charts/core/values.yaml)
`controller.ingress.enabled` | If true, create ingress for rest api, must also set ingress host value | `false` | enable this if ingress controller is installed
`controller.ingress.tls` | If true, TLS is enabled for controller rest api ingress service |`false` | If set, the tls-host used is the one set with `controller.ingress.host`.
`controller.ingress.host` | Must set this host value if ingress is enabled | `nil` |
@ -31,47 +31,47 @@
`controller.ingress.secretName` | Name of the secret to be used for TLS-encryption | `nil` | Secret must be created separately (Let's encrypt, manually)
`controller.ingress.path` | Set ingress path |`/` | If set, it might be necessary to set a rewrite rule in annotations.
-`controller.ingress.annotations` | Add annotations to ingress to influence behavior | `nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"` | see examples in [values.yaml](values.yaml)
+`controller.ingress.annotations` | Add annotations to ingress to influence behavior | `nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/tree/2.4.2/charts/core/values.yaml)
+`controller.ingress.annotations` | Add annotations to ingress to influence behavior | `nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/tree/2.4.3/charts/core/values.yaml)
`controller.configmap.enabled` | If true, configure NeuVector global settings using a ConfigMap | `false`
`controller.configmap.data` | NeuVector configuration in YAML format | `{}`
`controller.secret.enabled` | If true, configure NeuVector global settings using secrets | `false`
@@ -107,7 +107,7 @@
`enforcer.podLabels` | Specify the pod labels. | `{}` |
@@ -109,7 +109,7 @@
`enforcer.podAnnotations` | Specify the pod annotations. | `{}` |
`enforcer.env` | User-defined environment variables for enforcers. | `[]` |
`enforcer.tolerations` | List of node taints to tolerate | `- effect: NoSchedule`<br>`key: node-role.kubernetes.io/master` | other taints can be added after the default
-`enforcer.resources` | Add resources requests and limits to enforcer deployment | `{}` | see examples in [values.yaml](values.yaml)
+`enforcer.resources` | Add resources requests and limits to enforcer deployment | `{}` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/tree/2.4.2/charts/core/values.yaml)
+`enforcer.resources` | Add resources requests and limits to enforcer deployment | `{}` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/tree/2.4.3/charts/core/values.yaml)
`manager.enabled` | If true, create manager | `true` |
`manager.image.repository` | manager image repository | `neuvector/manager` |
`manager.image.hash` | manager image hash in the format of sha256:xxxx. If present it overwrites the image tag value. | |
@@ -117,7 +117,7 @@
@@ -119,7 +119,7 @@
`manager.env.ssl` | If false, manager will listen on HTTP access instead of HTTPS | `true` |
`manager.svc.type` | set manager service type for native Kubernetes | `NodePort`;<br>if it is OpenShift platform or ingress is enabled, then default is `ClusterIP` | set to LoadBalancer if using cloud providers, such as Azure, Amazon, Google
`manager.svc.loadBalancerIP` | if manager service type is LoadBalancer, this is used to specify the load balancer's IP | `nil` |
-`manager.svc.annotations` | Add annotations to manager service | `{}` | see examples in [values.yaml](values.yaml)
+`manager.svc.annotations` | Add annotations to manager service | `{}` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/tree/2.4.2/charts/core/values.yaml)
+`manager.svc.annotations` | Add annotations to manager service | `{}` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/tree/2.4.3/charts/core/values.yaml)
`manager.route.enabled` | If true, create a OpenShift route to expose the management console service | `true` |
`manager.route.host` | Set OpenShift route host for management console service | `nil` |
`manager.route.termination` | Specify TLS termination for OpenShift route for management console service. Possible passthrough, edge, reencrypt | `passthrough` |
@@ -132,10 +132,10 @@
@@ -134,10 +134,10 @@
`manager.ingress.host` | Must set this host value if ingress is enabled | `nil` |
`manager.ingress.ingressClassName` | To be used instead of the ingress.class annotation if an IngressClass is provisioned | `""` |
`manager.ingress.path` | Set ingress path |`/` | If set, it might be necessary to set a rewrite rule in annotations. Currently only supports `/`
-`manager.ingress.annotations` | Add annotations to ingress to influence behavior | `nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"` | see examples in [values.yaml](values.yaml)
+`manager.ingress.annotations` | Add annotations to ingress to influence behavior | `nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/tree/2.4.2/charts/core/values.yaml)
+`manager.ingress.annotations` | Add annotations to ingress to influence behavior | `nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/tree/2.4.3/charts/core/values.yaml)
`manager.ingress.tls` | If true, TLS is enabled for manager ingress service |`false` | If set, the tls-host used is the one set with `manager.ingress.host`.
`manager.ingress.secretName` | Name of the secret to be used for TLS-encryption | `nil` | Secret must be created separately (Let's encrypt, manually)
-`manager.resources` | Add resources requests and limits to manager deployment | `{}` | see examples in [values.yaml](values.yaml)
+`manager.resources` | Add resources requests and limits to manager deployment | `{}` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/tree/2.4.2/charts/core/values.yaml)
+`manager.resources` | Add resources requests and limits to manager deployment | `{}` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/tree/2.4.3/charts/core/values.yaml)
`manager.affinity` | manager affinity rules | `{}` |
`manager.tolerations` | List of node taints to tolerate | `nil` |
`manager.nodeSelector` | Enable and specify nodeSelector labels | `{}` |
@@ -160,7 +160,7 @@
`cve.scanner.podAnnotations` | Specify the pod annotations. | `{}` |
@@ -163,7 +163,7 @@
`cve.scanner.env` | User-defined environment variables for scanner. | `[]` |
`cve.scanner.replicas` | external scanner replicas | `3` |
`cve.scanner.dockerPath` | the remote docker socket if CI/CD integration need scan images before they are pushed to the registry | `nil` |
-`cve.scanner.resources` | Add resources requests and limits to scanner deployment | `{}` | see examples in [values.yaml](values.yaml) |
+`cve.scanner.resources` | Add resources requests and limits to scanner deployment | `{}` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/tree/2.4.2/charts/core/values.yaml) |
+`cve.scanner.resources` | Add resources requests and limits to scanner deployment | `{}` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/tree/2.4.3/charts/core/values.yaml) |
`cve.scanner.affinity` | scanner affinity rules | `{}` |
`cve.scanner.tolerations` | List of node taints to tolerate | `nil` |
`cve.scanner.nodeSelector` | Enable and specify nodeSelector labels | `{}` |

View File

@ -1,6 +1,6 @@
--- charts-original/templates/controller-deployment.yaml
+++ charts/templates/controller-deployment.yaml
@@ -63,19 +63,7 @@
@@ -71,19 +71,7 @@
serviceAccount: {{ .Values.serviceAccount }}
containers:
- name: neuvector-controller-pod

View File

@ -13,7 +13,7 @@
openshift: false
registry: docker.io
-tag: 5.1.1
-tag: 5.1.2
oem:
-imagePullSecrets:
-psp: false
@ -21,19 +21,19 @@
-serviceAccount: default
+serviceAccount: neuvector
controller:
# If false, controller will not be installed
@@ -22,7 +25,8 @@
internal: # enable when cert-manager is installed for the internal certificates
certmanager:
@@ -27,7 +30,8 @@
maxSurge: 1
maxUnavailable: 0
image:
- repository: neuvector/controller
+ repository: rancher/mirrored-neuvector-controller
+ tag: 5.1.1
+ tag: 5.1.2
hash:
replicas: 3
disruptionbudget: 0
@@ -70,7 +74,7 @@
@@ -75,7 +79,7 @@
# -----BEGIN PRIVATE KEY-----
# -----END PRIVATE KEY-----
ranchersso:
@ -42,27 +42,27 @@
pvc:
enabled: false
existingClaim: false
@@ -215,7 +219,8 @@
@@ -220,7 +224,8 @@
# If false, enforcer will not be installed
enabled: true
image:
- repository: neuvector/enforcer
+ repository: rancher/mirrored-neuvector-enforcer
+ tag: 5.1.1
+ tag: 5.1.2
hash:
updateStrategy:
type: RollingUpdate
@@ -245,7 +250,8 @@
@@ -251,7 +256,8 @@
# If false, manager will not be installed
enabled: true
image:
- repository: neuvector/manager
+ repository: rancher/mirrored-neuvector-manager
+ tag: 5.1.1
+ tag: 5.1.2
hash:
priorityClassName:
env:
@@ -316,7 +322,7 @@
@@ -322,7 +328,7 @@
enabled: true
secure: false
image:
@ -71,7 +71,7 @@
tag: latest
hash:
schedule: "0 0 * * *"
@@ -337,7 +343,7 @@
@@ -343,7 +349,7 @@
maxSurge: 1
maxUnavailable: 0
image:

View File

@ -1,5 +1,5 @@
url: https://neuvector.github.io/neuvector-helm/core-2.4.2.tgz
version: 102.0.0
url: https://neuvector.github.io/neuvector-helm/core-2.4.3.tgz
version: 102.0.1
additionalCharts:
- workingDir: charts-crd
crdOptions:

View File

@ -4,7 +4,7 @@ annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: true
apiVersion: v1
appVersion: 5.1.1
appVersion: 5.1.2
description: Helm chart for NeuVector's CRD services
home: https://neuvector.com
icon: https://avatars2.githubusercontent.com/u/19367275?s=200&v=4
@ -12,5 +12,5 @@ maintainers:
- email: support@neuvector.com
name: becitsthere
name: neuvector-crd
version: 2.4.2
version: 2.4.3
type: application

View File

@ -1,2 +1,2 @@
url: https://github.com/rancher/backup-restore-operator/releases/download/v3.1.0/rancher-backup-crd-3.1.0.tgz
version: 102.0.0
url: https://github.com/rancher/backup-restore-operator/releases/download/v3.1.1-rc2/rancher-backup-crd-3.1.1-rc2.tgz
version: 102.0.1

View File

@ -1,2 +1,2 @@
url: https://github.com/rancher/backup-restore-operator/releases/download/v3.1.0/rancher-backup-3.1.0.tgz
version: 102.0.0
url: https://github.com/rancher/backup-restore-operator/releases/download/v3.1.1-rc2/rancher-backup-3.1.1-rc2.tgz
version: 102.0.1

View File

@ -1,3 +1,7 @@
rancher-backup:
- 102.0.1+up3.1.1-rc2
rancher-backup-crd:
- 102.0.1+up3.1.1-rc2
rancher-eks-operator:
- 102.0.2+up1.2.0
- 102.0.1+up1.1.5
@ -6,6 +10,10 @@ rancher-eks-operator-crd:
- 102.0.2+up1.2.0
- 102.0.1+up1.1.5
- 102.0.0+up1.2.0
neuvector:
- 102.0.1+up2.4.3
neuvector-crd:
- 102.0.1+up2.4.3
rancher-istio:
- 102.2.0+up1.17.2
- 102.1.0+up1.16.3
- 102.1.0+up1.16.3