Merge pull request #698 from nflondo/main-source

Charts CI
pull/701/head
atrendafilov 2023-03-28 17:22:54 +03:00 committed by GitHub
commit e7fc80683a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
163 changed files with 3046 additions and 7775 deletions

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -1,13 +1,13 @@
annotations:
artifacthub.io/changes: |
- kind: changed
description: Upgrade Argo CD to v2.6.6
- kind: fixed
description: Surround with if hostNetwork field to disable it when is set to false
catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: Argo CD
catalog.cattle.io/kube-version: '>=1.22.0-0'
catalog.cattle.io/release-name: argo-cd
apiVersion: v2
appVersion: v2.6.6
appVersion: v2.6.7
dependencies:
- condition: redis-ha.enabled
name: redis-ha
@ -29,4 +29,4 @@ name: argo-cd
sources:
- https://github.com/argoproj/argo-helm/tree/main/charts/argo-cd
- https://github.com/argoproj/argo-cd
version: 5.27.1
version: 5.27.4

View File

@ -312,7 +312,9 @@ spec:
path: tls.key
- key: ca.crt
path: ca.crt
{{- if .Values.controller.hostNetwork }}
hostNetwork: {{ .Values.controller.hostNetwork }}
{{- end }}
{{- with .Values.controller.dnsConfig }}
dnsConfig:
{{- toYaml . | nindent 8 }}

View File

@ -355,7 +355,9 @@ spec:
path: tls.key
- key: ca.crt
path: ca.crt
{{- if .Values.repoServer.hostNetwork }}
hostNetwork: {{ .Values.repoServer.hostNetwork }}
{{- end }}
{{- with .Values.repoServer.dnsConfig }}
dnsConfig:
{{- toYaml . | nindent 8 }}

View File

@ -421,7 +421,9 @@ spec:
path: tls.crt
- key: ca.crt
path: ca.crt
{{- if .Values.server.hostNetwork }}
hostNetwork: {{ .Values.server.hostNetwork }}
{{- end }}
{{- with .Values.server.dnsConfig }}
dnsConfig:
{{- toYaml . | nindent 8 }}

View File

@ -307,7 +307,7 @@ configs:
bitbucket.org ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kNvEcqTKl/VqLat/MaB33pZy0y3rJZtnqwR2qOOvbwKZYKiEO1O6VqNEBxKvJJelCq0dTXWT5pbO2gDXC6h6QDXCaHo6pOHGPUy+YBaGQRGuSusMEASYiWunYN0vCAI8QaXnWMXNMdFP3jHAJH0eDsoiGnLPBlBp4TNm6rYI74nMzgz3B9IikW4WVK+dc8KZJZWYjAuORU3jc1c/NPskD2ASinf8v3xnfXeukU0sJ5N6m5E8VLjObPEO+mN2t/FZTMZLiFqPWc/ALSqnMnnhwrNi2rbfg/rd/IpL8Le3pSBne8+seeFVBoGqzHM9yXw==
github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg=
github.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl
github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==
github.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCj7ndNxQowgcQnjshcLrqPEiiphnt+VTTvDP6mHBL9j1aNUkY4Ue1gvwnGLVlOhGeYrnZaMgRK6+PKCUXaDbC7qtbW8gIkhL7aGCsOr/C56SJMy/BCZfxd1nWzAOxSDPgVsmerOBYfNqltV9/hWCqBywINIR+5dIg6JTJ72pcEpEjcYgXkE2YEFXV1JHnsKgbLWNlhScqb2UmyRkQyytRLtL+38TGxkxCflmO+5Z8CSSNY7GidjMIZ7Q4zMjA2n1nGrlTDkzwDCsw+wqFPGQA179cnfGWOWRVruj16z6XyvxvjJwbz0wQZ75XK5tKSb7FNyeIEs4TT4jk+S4dhPeAUC5y+bDYirYgM4GC7uEnztnZyaVWQ7B381AK4Qdrwt51ZqExKbQpTUNn+EjqoTwvqNj4kqx5QUCI0ThS/YkOxJCXmPUWZbhjpCg56i+2aB6CmK2JGhn57K5mj0MNdBXA4/WnwH6XoPWJzK5Nyu2zB3nAZp+S5hpQs+p1vN1/wsjk=
gitlab.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFSMqzJeV9rUzU4kWitGjeR4PWSa29SPqJ1fVkhtj3Hw9xjLVXVYrU9QlYWrOLXBpQ6KWjbjTDTdDkoohFzgbEY=
gitlab.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfuCHKVTjquxvt6CM6tdG4SLp1Btn/nOeHHE5UOzRdf
gitlab.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9

View File

@ -35,4 +35,4 @@ name: kafka
sources:
- https://github.com/bitnami/containers/tree/main/bitnami/kafka
- https://kafka.apache.org/
version: 21.4.1
version: 21.4.2

View File

@ -82,7 +82,7 @@ The command removes all the Kubernetes components associated with the chart and
| ------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------- |
| `image.registry` | Kafka image registry | `docker.io` |
| `image.repository` | Kafka image repository | `bitnami/kafka` |
| `image.tag` | Kafka image tag (immutable tags are recommended) | `3.4.0-debian-11-r12` |
| `image.tag` | Kafka image tag (immutable tags are recommended) | `3.4.0-debian-11-r13` |
| `image.digest` | Kafka image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `image.pullPolicy` | Kafka image pull policy | `IfNotPresent` |
| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` |
@ -255,7 +255,7 @@ The command removes all the Kubernetes components associated with the chart and
| `externalAccess.autoDiscovery.enabled` | Enable using an init container to auto-detect external IPs/ports by querying the K8s API | `false` |
| `externalAccess.autoDiscovery.image.registry` | Init container auto-discovery image registry | `docker.io` |
| `externalAccess.autoDiscovery.image.repository` | Init container auto-discovery image repository | `bitnami/kubectl` |
| `externalAccess.autoDiscovery.image.tag` | Init container auto-discovery image tag (immutable tags are recommended) | `1.25.7-debian-11-r4` |
| `externalAccess.autoDiscovery.image.tag` | Init container auto-discovery image tag (immutable tags are recommended) | `1.25.8-debian-11-r1` |
| `externalAccess.autoDiscovery.image.digest` | Kubectl image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `externalAccess.autoDiscovery.image.pullPolicy` | Init container auto-discovery image pull policy | `IfNotPresent` |
| `externalAccess.autoDiscovery.image.pullSecrets` | Init container auto-discovery image pull secrets | `[]` |
@ -311,7 +311,7 @@ The command removes all the Kubernetes components associated with the chart and
| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume | `false` |
| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` |
| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/bitnami-shell` |
| `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `11-debian-11-r99` |
| `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `11-debian-11-r100` |
| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` |
| `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` |
@ -336,7 +336,7 @@ The command removes all the Kubernetes components associated with the chart and
| `metrics.kafka.enabled` | Whether or not to create a standalone Kafka exporter to expose Kafka metrics | `false` |
| `metrics.kafka.image.registry` | Kafka exporter image registry | `docker.io` |
| `metrics.kafka.image.repository` | Kafka exporter image repository | `bitnami/kafka-exporter` |
| `metrics.kafka.image.tag` | Kafka exporter image tag (immutable tags are recommended) | `1.6.0-debian-11-r70` |
| `metrics.kafka.image.tag` | Kafka exporter image tag (immutable tags are recommended) | `1.6.0-debian-11-r72` |
| `metrics.kafka.image.digest` | Kafka exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `metrics.kafka.image.pullPolicy` | Kafka exporter image pull policy | `IfNotPresent` |
| `metrics.kafka.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` |

View File

@ -77,7 +77,7 @@ diagnosticMode:
image:
registry: docker.io
repository: bitnami/kafka
tag: 3.4.0-debian-11-r12
tag: 3.4.0-debian-11-r13
digest: ""
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
@ -779,7 +779,7 @@ externalAccess:
image:
registry: docker.io
repository: bitnami/kubectl
tag: 1.25.7-debian-11-r4
tag: 1.25.8-debian-11-r1
digest: ""
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
@ -1028,7 +1028,7 @@ volumePermissions:
image:
registry: docker.io
repository: bitnami/bitnami-shell
tag: 11-debian-11-r99
tag: 11-debian-11-r100
digest: ""
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
@ -1110,7 +1110,7 @@ metrics:
image:
registry: docker.io
repository: bitnami/kafka-exporter
tag: 1.6.0-debian-11-r70
tag: 1.6.0-debian-11-r72
digest: ""
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'

View File

@ -28,4 +28,4 @@ maintainers:
name: redis
sources:
- https://github.com/bitnami/containers/tree/main/bitnami/redis
version: 17.9.0
version: 17.9.2

View File

@ -99,7 +99,7 @@ The command removes all the Kubernetes components associated with the chart and
| ------------------- | ---------------------------------------------------------------------------------------------------------- | --------------------- |
| `image.registry` | Redis® image registry | `docker.io` |
| `image.repository` | Redis® image repository | `bitnami/redis` |
| `image.tag` | Redis® image tag (immutable tags are recommended) | `7.0.10-debian-11-r0` |
| `image.tag` | Redis® image tag (immutable tags are recommended) | `7.0.10-debian-11-r2` |
| `image.digest` | Redis® image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `image.pullPolicy` | Redis® image pull policy | `IfNotPresent` |
| `image.pullSecrets` | Redis® image pull secrets | `[]` |
@ -333,11 +333,12 @@ The command removes all the Kubernetes components associated with the chart and
| `sentinel.enabled` | Use Redis® Sentinel on Redis® pods. | `false` |
| `sentinel.image.registry` | Redis® Sentinel image registry | `docker.io` |
| `sentinel.image.repository` | Redis® Sentinel image repository | `bitnami/redis-sentinel` |
| `sentinel.image.tag` | Redis® Sentinel image tag (immutable tags are recommended) | `7.0.9-debian-11-r5` |
| `sentinel.image.tag` | Redis® Sentinel image tag (immutable tags are recommended) | `7.0.10-debian-11-r1` |
| `sentinel.image.digest` | Redis® Sentinel image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `sentinel.image.pullPolicy` | Redis® Sentinel image pull policy | `IfNotPresent` |
| `sentinel.image.pullSecrets` | Redis® Sentinel image pull secrets | `[]` |
| `sentinel.image.debug` | Enable image debug mode | `false` |
| `sentinel.annotations` | Additional custom annotations for Redis® Sentinel resource | `{}` |
| `sentinel.masterSet` | Master set name | `mymaster` |
| `sentinel.quorum` | Sentinel Quorum | `2` |
| `sentinel.getMasterTimeout` | Amount of time to allow before get_sentinel_master_info() times out. | `220` |
@ -450,7 +451,7 @@ The command removes all the Kubernetes components associated with the chart and
| `metrics.enabled` | Start a sidecar prometheus exporter to expose Redis® metrics | `false` |
| `metrics.image.registry` | Redis® Exporter image registry | `docker.io` |
| `metrics.image.repository` | Redis® Exporter image repository | `bitnami/redis-exporter` |
| `metrics.image.tag` | Redis® Exporter image tag (immutable tags are recommended) | `1.48.0-debian-11-r5` |
| `metrics.image.tag` | Redis® Exporter image tag (immutable tags are recommended) | `1.48.0-debian-11-r6` |
| `metrics.image.digest` | Redis® Exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `metrics.image.pullPolicy` | Redis® Exporter image pull policy | `IfNotPresent` |
| `metrics.image.pullSecrets` | Redis® Exporter image pull secrets | `[]` |
@ -515,7 +516,7 @@ The command removes all the Kubernetes components associated with the chart and
| `volumePermissions.enabled` | Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup` | `false` |
| `volumePermissions.image.registry` | Bitnami Shell image registry | `docker.io` |
| `volumePermissions.image.repository` | Bitnami Shell image repository | `bitnami/bitnami-shell` |
| `volumePermissions.image.tag` | Bitnami Shell image tag (immutable tags are recommended) | `11-debian-11-r98` |
| `volumePermissions.image.tag` | Bitnami Shell image tag (immutable tags are recommended) | `11-debian-11-r99` |
| `volumePermissions.image.digest` | Bitnami Shell image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `volumePermissions.image.pullPolicy` | Bitnami Shell image pull policy | `IfNotPresent` |
| `volumePermissions.image.pullSecrets` | Bitnami Shell image pull secrets | `[]` |
@ -525,7 +526,7 @@ The command removes all the Kubernetes components associated with the chart and
| `sysctl.enabled` | Enable init container to modify Kernel settings | `false` |
| `sysctl.image.registry` | Bitnami Shell image registry | `docker.io` |
| `sysctl.image.repository` | Bitnami Shell image repository | `bitnami/bitnami-shell` |
| `sysctl.image.tag` | Bitnami Shell image tag (immutable tags are recommended) | `11-debian-11-r98` |
| `sysctl.image.tag` | Bitnami Shell image tag (immutable tags are recommended) | `11-debian-11-r99` |
| `sysctl.image.digest` | Bitnami Shell image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `sysctl.image.pullPolicy` | Bitnami Shell image pull policy | `IfNotPresent` |
| `sysctl.image.pullSecrets` | Bitnami Shell image pull secrets | `[]` |

View File

@ -29,7 +29,7 @@ metadata:
{{- end }}
type: Opaque
data:
redis-password: {{ include "redis.password" . | b64enc | quote }}
redis-password: {{ print $password | b64enc | quote }}
{{- end -}}
{{- if .Values.serviceBindings.enabled }}
---

View File

@ -10,8 +10,14 @@ metadata:
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if or .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- if or .Values.commonAnnotations .Values.sentinel.annotations }}
annotations:
{{- if .Values.commonAnnotations }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
{{- if .Values.sentinel.annotations }}
{{- include "common.tplvalues.render" ( dict "value" .Values.sentinel.annotations "context" $ ) | nindent 4 }}
{{- end }}
{{- end }}
spec:
replicas: {{ .Values.replica.replicaCount }}

View File

@ -82,7 +82,7 @@ diagnosticMode:
image:
registry: docker.io
repository: bitnami/redis
tag: 7.0.10-debian-11-r0
tag: 7.0.10-debian-11-r2
digest: ""
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
@ -995,7 +995,7 @@ sentinel:
image:
registry: docker.io
repository: bitnami/redis-sentinel
tag: 7.0.9-debian-11-r5
tag: 7.0.10-debian-11-r1
digest: ""
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
@ -1013,6 +1013,9 @@ sentinel:
## Enable debug mode
##
debug: false
## @param sentinel.annotations Additional custom annotations for Redis® Sentinel resource
##
annotations: {}
## @param sentinel.masterSet Master set name
##
masterSet: mymaster
@ -1434,7 +1437,7 @@ metrics:
image:
registry: docker.io
repository: bitnami/redis-exporter
tag: 1.48.0-debian-11-r5
tag: 1.48.0-debian-11-r6
digest: ""
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
@ -1685,7 +1688,7 @@ volumePermissions:
image:
registry: docker.io
repository: bitnami/bitnami-shell
tag: 11-debian-11-r98
tag: 11-debian-11-r99
digest: ""
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
@ -1733,7 +1736,7 @@ sysctl:
image:
registry: docker.io
repository: bitnami/bitnami-shell
tag: 11-debian-11-r98
tag: 11-debian-11-r99
digest: ""
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.

View File

@ -41,4 +41,4 @@ name: wordpress
sources:
- https://github.com/bitnami/containers/tree/main/bitnami/wordpress
- https://wordpress.org/
version: 15.2.57
version: 15.2.58

View File

@ -82,7 +82,7 @@ The command removes all the Kubernetes components associated with the chart and
| ------------------- | --------------------------------------------------------------------------------------------------------- | --------------------- |
| `image.registry` | WordPress image registry | `docker.io` |
| `image.repository` | WordPress image repository | `bitnami/wordpress` |
| `image.tag` | WordPress image tag (immutable tags are recommended) | `6.1.1-debian-11-r69` |
| `image.tag` | WordPress image tag (immutable tags are recommended) | `6.1.1-debian-11-r70` |
| `image.digest` | WordPress image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `image.pullPolicy` | WordPress image pull policy | `IfNotPresent` |
| `image.pullSecrets` | WordPress image pull secrets | `[]` |
@ -247,7 +247,7 @@ The command removes all the Kubernetes components associated with the chart and
| `volumePermissions.enabled` | Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup` | `false` |
| `volumePermissions.image.registry` | Bitnami Shell image registry | `docker.io` |
| `volumePermissions.image.repository` | Bitnami Shell image repository | `bitnami/bitnami-shell` |
| `volumePermissions.image.tag` | Bitnami Shell image tag (immutable tags are recommended) | `11-debian-11-r99` |
| `volumePermissions.image.tag` | Bitnami Shell image tag (immutable tags are recommended) | `11-debian-11-r100` |
| `volumePermissions.image.digest` | Bitnami Shell image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `volumePermissions.image.pullPolicy` | Bitnami Shell image pull policy | `IfNotPresent` |
| `volumePermissions.image.pullSecrets` | Bitnami Shell image pull secrets | `[]` |
@ -279,7 +279,7 @@ The command removes all the Kubernetes components associated with the chart and
| `metrics.enabled` | Start a sidecar prometheus exporter to expose metrics | `false` |
| `metrics.image.registry` | Apache exporter image registry | `docker.io` |
| `metrics.image.repository` | Apache exporter image repository | `bitnami/apache-exporter` |
| `metrics.image.tag` | Apache exporter image tag (immutable tags are recommended) | `0.13.0-debian-11-r9` |
| `metrics.image.tag` | Apache exporter image tag (immutable tags are recommended) | `0.13.1-debian-11-r0` |
| `metrics.image.digest` | Apache exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `metrics.image.pullPolicy` | Apache exporter image pull policy | `IfNotPresent` |
| `metrics.image.pullSecrets` | Apache exporter image pull secrets | `[]` |

View File

@ -73,7 +73,7 @@ diagnosticMode:
image:
registry: docker.io
repository: bitnami/wordpress
tag: 6.1.1-debian-11-r69
tag: 6.1.1-debian-11-r70
digest: ""
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
@ -757,7 +757,7 @@ volumePermissions:
image:
registry: docker.io
repository: bitnami/bitnami-shell
tag: 11-debian-11-r99
tag: 11-debian-11-r100
digest: ""
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
@ -851,7 +851,7 @@ metrics:
image:
registry: docker.io
repository: bitnami/apache-exporter
tag: 0.13.0-debian-11-r9
tag: 0.13.1-debian-11-r0
digest: ""
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.

View File

@ -4,7 +4,7 @@ annotations:
catalog.cattle.io/kube-version: '>=1.21.0-0'
catalog.cattle.io/release-name: kamaji
apiVersion: v2
appVersion: v0.2.1
appVersion: v0.2.2
description: Kamaji is a tool aimed to build and operate a Managed Kubernetes Service
with a fraction of the operational burden. With Kamaji, you can deploy and operate
hundreds of Kubernetes clusters as a hyper-scaler.
@ -22,4 +22,4 @@ name: kamaji
sources:
- https://github.com/clastix/kamaji
type: application
version: 0.11.3
version: 0.11.4

View File

@ -1,6 +1,6 @@
# kamaji
![Version: 0.11.3](https://img.shields.io/badge/Version-0.11.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v0.2.1](https://img.shields.io/badge/AppVersion-v0.2.1-informational?style=flat-square)
![Version: 0.11.4](https://img.shields.io/badge/Version-0.11.4-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v0.2.2](https://img.shields.io/badge/AppVersion-v0.2.2-informational?style=flat-square)
Kamaji is a tool aimed to build and operate a Managed Kubernetes Service with a fraction of the operational burden. With Kamaji, you can deploy and operate hundreds of Kubernetes clusters as a hyper-scaler.

View File

@ -4,9 +4,15 @@ annotations:
catalog.cattle.io/kube-version: '>=1.18-0'
catalog.cattle.io/release-name: cf-runtime
apiVersion: v2
appVersion: 1.9.12
description: A Helm chart for Codefresh Runner
home: https://github.com/codefresh-io/venona
icon: https://partner-charts.rancher.io/assets/logos/codefresh.jpg
keywords:
- codefresh
- runner
kubeVersion: '>=1.18-0'
maintainers:
- name: codefresh
url: https://codefresh-io.github.io/
name: cf-runtime
type: application
version: 1.9.12
version: 1.0.3

View File

@ -1,23 +1,126 @@
# cf-runtime helm chart
To install the [Codefresh Runner](https://codefresh.io/docs/docs/administration/codefresh-runner/) using helm you need to follow these steps:
## Codefresh Runner
![Version: 1.0.3](https://img.shields.io/badge/Version-1.0.3-informational?style=flat-square)
## Prerequisites
- Kubernetes 1.19+
- Helm 3.8.0+
## Get Repo Info
```console
helm repo add cf-runtime http://chartmuseum.codefresh.io/cf-runtime
helm repo update
```
## Install Chart
**Important:** only helm3 is supported
1. Download the Codefresh CLI and authenticate it with your Codefresh account. Click [here](https://codefresh-io.github.io/cli/getting-started/) for more detailed instructions.
2. Run the following command to create all of the necessary enitites in Codefresh:
```
2. Run the following command to create mandatory values for Codefresh Runner:
```console
codefresh runner init --generate-helm-values-file
```
* This will not install anything on your cluster, except for running cluster acceptance tests, which may be skipped using the `--skip-cluster-test` option).
* This will not install anything on your cluster, except for running cluster acceptance tests, which may be skipped using the `--skip-cluster-test` option.
* This command will also generate a `generated_values.yaml` file in your current directory, which you will need to provide to the `helm install` command later.
3. Now run the following to complete the installation:
3. Now run the following to complete the installation:
```
```console
helm repo add cf-runtime https://chartmuseum.codefresh.io/cf-runtime
helm install cf-runtime cf-runtime/cf-runtime -f ./generated_values.yaml --create-namespace --namespace codefresh
```
4. At this point you should have a working Codefresh Runner. You can verify the installation by running:
helm upgrade --install cf-runtime cf-runtime/cf-runtime -f ./generated_values.yaml --create-namespace --namespace codefresh
```
4. At this point you should have a working Codefresh Runner. You can verify the installation by running:
```console
codefresh runner execute-test-pipeline --runtime-name <runtime-name>
```
## Requirements
Kubernetes: `>=1.19.0-0`
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| appProxy.enabled | bool | `false` | Enable app-proxy |
| appProxy.env | object | `{}` | |
| appProxy.image | string | `"codefresh/cf-app-proxy:latest"` | Set app-proxy image |
| appProxy.ingress.annotations | object | `{}` | Set extra annotations for ingress object |
| appProxy.ingress.class | string | `""` | Set ingress class |
| appProxy.ingress.host | string | `""` | Set DNS hostname the ingress will use |
| appProxy.ingress.pathPrefix | string | `"/"` | Set path prefix for ingress |
| appProxy.ingress.tlsSecret | string | `""` | Set k8s tls secret for the ingress object |
| appProxy.resources | object | `{}` | |
| appProxy.serviceAccount.annotations | object | `{}` | |
| dockerRegistry | string | `"quay.io"` | Set docker registry prefix for the runtime images |
| global.accountId | string | `""` | |
| global.agentId | string | `""` | |
| global.agentName | string | `""` | |
| global.agentToken | string | `""` | |
| global.codefreshHost | string | `""` | |
| global.existingAgentToken | string | `""` | Existing secret (name-of-existing-secret) with API token from Codefresh (supersedes value for global.agentToken; secret must contain `codefresh.token` key) |
| global.existingDindCertsSecret | string | `""` | Existing secret (name has to be `codefresh-certs-server`) (supersedes value for global.keys; secret must contain `server-cert.pem` `server-key.pem` and `ca.pem`` keys) |
| global.keys.ca | string | `""` | |
| global.keys.key | string | `""` | |
| global.keys.serverCert | string | `""` | |
| global.namespace | string | `"codefresh"` | |
| global.runtimeName | string | `""` | |
| monitor.clusterId | string | `""` | Cluster name as it registered in account |
| monitor.enabled | bool | `false` | Enable monitor Ref: https://codefresh.io/docs/docs/installation/codefresh-runner/#install-monitoring-component |
| monitor.env | object | `{}` | |
| monitor.existingMonitorToken | string | `""` | Set Existing secret (name-of-existing-secret) with API token from Codefresh (supersedes value of monitor.token; secret must contain `codefresh.token` key) |
| monitor.helm3 | bool | `true` | keep true as default! |
| monitor.image | string | `"codefresh/agent:stable"` | Set monitor image |
| monitor.resources | object | `{}` | |
| monitor.serviceAccount.annotations | object | `{}` | |
| monitor.token | string | `""` | API token from Codefresh |
| monitor.useNamespaceWideRole | bool | `false` | Use ClusterRole (`false`) or Role (`true`) RBAC |
| re.dindDaemon.experimental | bool | `true` | |
| re.dindDaemon.hosts[0] | string | `"unix:///var/run/docker.sock"` | |
| re.dindDaemon.hosts[1] | string | `"tcp://0.0.0.0:1300"` | |
| re.dindDaemon.insecure-registries[0] | string | `"192.168.99.100:5000"` | |
| re.dindDaemon.metrics-addr | string | `"0.0.0.0:9323"` | |
| re.dindDaemon.tls | bool | `true` | |
| re.dindDaemon.tlscacert | string | `"/etc/ssl/cf-client/ca.pem"` | |
| re.dindDaemon.tlscert | string | `"/etc/ssl/cf/server-cert.pem"` | |
| re.dindDaemon.tlskey | string | `"/etc/ssl/cf/server-key.pem"` | |
| re.dindDaemon.tlsverify | bool | `true` | |
| re.serviceAccount | object | `{"annotations":{}}` | Set annotation on engine Service Account Ref: https://codefresh.io/docs/docs/administration/codefresh-runner/#injecting-aws-arn-roles-into-the-cluster |
| runner.env | object | `{}` | Add additional env vars |
| runner.image | string | `"codefresh/venona:1.9.13"` | Set runner image |
| runner.nodeSelector | object | `{}` | Set runner node selector |
| runner.resources | object | `{}` | Set runner requests and limits |
| runner.tolerations | list | `[]` | Set runner tolerations |
| storage.azuredisk.cachingMode | string | `"None"` | |
| storage.azuredisk.skuName | string | `"Premium_LRS"` | Set storage type (`Premium_LRS`) |
| storage.backend | string | `"local"` | Set backend volume type (`local`/`ebs`/`ebs-csi`/`gcedisk`/`azuredisk`) |
| storage.ebs.accessKeyId | string | `""` | Set AWS_ACCESS_KEY_ID for volume-provisioner (optional) Ref: https://codefresh.io/docs/docs/installation/codefresh-runner/#dind-volume-provisioner-permissions |
| storage.ebs.availabilityZone | string | `"us-east-1a"` | Set EBS volumes availability zone (required) |
| storage.ebs.encrypted | string | `"false"` | Enable encryption (optional) |
| storage.ebs.kmsKeyId | string | `""` | Set KMS encryption key ID (optional) |
| storage.ebs.secretAccessKey | string | `""` | Set AWS_SECRET_ACCESS_KEY for volume-provisioner (optional) Ref: https://codefresh.io/docs/docs/installation/codefresh-runner/#dind-volume-provisioner-permissions |
| storage.ebs.volumeType | string | `"gp2"` | Set EBS volume type (`gp2`/`gp3`/`io1`) (required) |
| storage.fsType | string | `"ext4"` | Set filesystem type (`ext4`/`xfs`) |
| storage.gcedisk.availabilityZone | string | `"us-west1-a"` | Set GCP volume availability zone |
| storage.gcedisk.serviceAccountJson | string | `""` | Set Google SA JSON key for volume-provisioner (optional) |
| storage.gcedisk.volumeType | string | `"pd-ssd"` | Set GCP volume backend type (`pd-ssd`/`pd-standard`) |
| storage.local.volumeParentDir | string | `"/var/lib/codefresh/dind-volumes"` | Set volume path on the host filesystem |
| storage.localVolumeMonitor.env | object | `{}` | |
| storage.localVolumeMonitor.image | string | `"codefresh/dind-volume-utils:1.29.3"` | Set `dind-lv-monitor` image |
| storage.localVolumeMonitor.nodeSelector | object | `{}` | |
| storage.localVolumeMonitor.resources | object | `{}` | |
| storage.localVolumeMonitor.tolerations | list | `[]` | |
| volumeProvisioner.annotations | object | `{}` | |
| volumeProvisioner.env | object | `{}` | Add additional env vars |
| volumeProvisioner.image | string | `"codefresh/dind-volume-provisioner:1.33.2"` | Set volume-provisioner image |
| volumeProvisioner.nodeSelector | object | `{}` | Set volume-provisioner node selector |
| volumeProvisioner.resources | object | `{}` | Set volume-provisioner requests and limits |
| volumeProvisioner.securityContext | object | `{"enabled":true}` | Enable volume-provisioner pod's security context (running as non root user) |
| volumeProvisioner.serviceAccount | object | `{}` | Set annotation on volume-provisioner Service Account |
| volumeProvisioner.tolerations | list | `[]` | Set volume-provisioner tolerations |
| volumeProvisioner.volume-cleanup.image | string | `"codefresh/dind-volume-cleanup:1.2.0"` | Set `dind-volume-cleanup` image |

View File

@ -0,0 +1,44 @@
## Codefresh Runner
{{ template "chart.versionBadge" . }}{{ template "chart.typeBadge" . }}{{ template "chart.appVersionBadge" . }}
## Prerequisites
- Kubernetes 1.19+
- Helm 3.8.0+
## Get Repo Info
```console
helm repo add cf-runtime http://chartmuseum.codefresh.io/cf-runtime
helm repo update
```
## Install Chart
**Important:** only helm3 is supported
1. Download the Codefresh CLI and authenticate it with your Codefresh account. Click [here](https://codefresh-io.github.io/cli/getting-started/) for more detailed instructions.
2. Run the following command to create mandatory values for Codefresh Runner:
```console
codefresh runner init --generate-helm-values-file
```
* This will not install anything on your cluster, except for running cluster acceptance tests, which may be skipped using the `--skip-cluster-test` option.
* This command will also generate a `generated_values.yaml` file in your current directory, which you will need to provide to the `helm install` command later.
3. Now run the following to complete the installation:
```console
helm repo add cf-runtime https://chartmuseum.codefresh.io/cf-runtime
helm upgrade --install cf-runtime cf-runtime/cf-runtime -f ./generated_values.yaml --create-namespace --namespace codefresh
```
4. At this point you should have a working Codefresh Runner. You can verify the installation by running:
```console
codefresh runner execute-test-pipeline --runtime-name <runtime-name>
```
{{ template "chart.requirementsSection" . }}
{{ template "chart.valuesSection" . }}

View File

@ -68,8 +68,9 @@ codefresh.io/application: pv-cleanup
{{- define "cf-vp.docker-image-volume-utils" -}}
{{- if ne .Values.dockerRegistry ""}}
{{- .Values.dockerRegistry }}/codefresh/dind-volume-utils:1.29.2
{{- else }}codefresh/dind-volume-utils:1.29.2
{{- .Values.dockerRegistry }}/{{ .Values.storage.localVolumeMonitor.image }}
{{- else }}
{{- index .Values.storage.localVolumeMonitor.image }}
{{- end}}
{{- end }}
@ -83,7 +84,8 @@ codefresh.io/application: pv-cleanup
{{- define "cf-vp.docker-image-cleanup-cron" -}}
{{- if ne .Values.dockerRegistry ""}}
{{- .Values.dockerRegistry }}/codefresh/dind-volume-cleanup:1.2.0
{{- else }}codefresh/dind-volume-cleanup:1.2.0
{{- .Values.dockerRegistry }}/{{ index .Values "volumeProvisioner" "volume-cleanup" "image" }}
{{- else }}
{{- index .Values "volumeProvisioner" "volume-cleanup" "image" }}
{{- end}}
{{- end }}

View File

@ -27,13 +27,28 @@ spec:
{{- if .Values.storage.localVolumeMonitor.tolerations }}
{{ toYaml .Values.storage.localVolumeMonitor.tolerations | indent 8 }}
{{- end }}
initContainers:
- command:
- chown
- -R
- 1000:1000
- /var/lib/codefresh/dind-volumes
image: alpine
imagePullPolicy: Always
name: fs-change-owner
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /var/lib/codefresh/dind-volumes
name: dind-volume-dir
containers:
- image: {{ include "cf-vp.docker-image-volume-utils" . }}
name: lv-cleaner
imagePullPolicy: Always
resources: {{ toYaml .Values.storage.localVolumeMonitor.resources | nindent 12 }}
command:
- /bin/local-volumes-agent
- /home/dind-volume-utils/bin/local-volumes-agent
env:
{{- if .Values.storage.localVolumeMonitor.env }}
{{- range $key, $value := .Values.storage.localVolumeMonitor.env }}
@ -51,6 +66,8 @@ spec:
- mountPath: {{ $localVolumeParentDir }}
readOnly: false
name: dind-volume-dir
securityContext:
fsGroup: 1000
volumes:
- name: dind-volume-dir
hostPath:

View File

@ -1,38 +1,44 @@
# global values are in generated_values.yaml
# run `codefresh runner init --generate-helm-values-file` first
# Global parameters.
# Global values are in generated_values.yaml. Run `codefresh runner init --generate-helm-values-file`!
# @default -- See below
global:
namespace: ""
namespace: "codefresh"
codefreshHost: ""
agentToken: ""
# Existing secret (name-of-existing-secret) with API token from Codefresh (supersedes value for global.agentToken; secret must contain `codefresh.token` key)
# -- Existing secret (name-of-existing-secret) with API token from Codefresh (supersedes value for global.agentToken; secret must contain `codefresh.token` key)
existingAgentToken: ""
agentId: ""
agentName: ""
accountId: ""
runtimeName: ""
# Existing secret (name has to be `codefresh-certs-server`) (supersedes value for global.keys; secret must contain `server-cert.pem` `server-key.pem` and `ca.pem`` keys)
# -- Existing secret (name has to be `codefresh-certs-server`) (supersedes value for global.keys; secret must contain `server-cert.pem` `server-key.pem` and `ca.pem`` keys)
existingDindCertsSecret: ""
keys:
key: ""
csr: ""
ca: ""
serverCert: ""
dockerRegistry: "quay.io" # Registry prefix for the runtime images (default quay.io)
## e.g:
# dockerRegistry: "docker.io"
newRelicLicense: "" # NEWRELIC_LICENSE_KEY (for app-proxy and runner deployments)
# -- Set docker registry prefix for the runtime images
dockerRegistry: "quay.io"
# E.g
# dockerRegistry: "myreqistry.local"
runner: # Runner Deployment
image: "codefresh/venona:1.9.12"
# Runner parameters
# @default -- See below
runner:
# -- Set runner image
image: "codefresh/venona:1.9.13"
# -- Add additional env vars
env: {}
## e.g:
# E.g
# env:
# HTTP_PROXY: 10.20.0.35:8080
# HTTPS_PROXY: 10.20.0.35:8080
# NO_PROXY: 10.20.0.*
# -- Set runner requests and limits
resources: {}
## e.g:
# E.g
# resources:
# limits:
# cpu: 400m
@ -40,65 +46,105 @@ runner: # Runner Deployment
# requests:
# cpu: 200m
# memory: 500Mi
# -- Set runner node selector
nodeSelector: {}
## e.g:
# E.g
# nodeSelector:
# foo: bar
# -- Set runner tolerations
tolerations: []
## e.g:
## E.g
# tolerations:
# - key: codefresh
# operator: Equal
# value: dind
# effect: NoSchedule
volumeProvisioner: # Volume-Provisioner Deployment
image: "codefresh/dind-volume-provisioner:1.33.1"
serviceAccount: {} # annotate volume-provisioner service account
## e.g:
# Volume Provisioner parameters
# @default -- See below
volumeProvisioner:
# -- Set volume-provisioner image
image: "codefresh/dind-volume-provisioner:1.33.2"
# -- Set annotation on volume-provisioner Service Account
serviceAccount: {}
# E.g
# serviceAccount:
# annotations:
# eks.amazonaws.com/role-arn: "arn:aws:iam::<ACCOUNT_ID>:role/<IAM_ROLE_NAME>"
# -- Set volume-provisioner node selector
nodeSelector: {}
# -- Set volume-provisioner requests and limits
resources: {}
# -- Set volume-provisioner tolerations
tolerations: []
# Running as non root user is supported since version 1.32.0
# -- Enable volume-provisioner pod's security context (running as non root user)
securityContext:
enabled: true
# -- Add additional env vars
env: {}
## e.g:
# E.g
# env:
# PRIVILEGED_CONTAINER: true
### https://codefresh.io/docs/docs/administration/codefresh-runner/#installing-on-aks
# mountAzureJson: true
annotations: {} # annotate volume-provisioner pod
storage: # Storage parameters for Volume-Provisioner
backend: local # volume type: local(default), ebs, gcedisk or azuredisk
fsType: "ext4" # filesystem type: ext4(default) or xfs
# `dind-volume-cleanup` CronJob parameters
# @default -- See below
volume-cleanup:
# -- Set `dind-volume-cleanup` image
image: codefresh/dind-volume-cleanup:1.2.0
# Storage example for local volumes on the K8S nodes filesystem
# Storage parameters for volume-provisioner
# @default -- See below
storage:
# -- Set backend volume type (`local`/`ebs`/`ebs-csi`/`gcedisk`/`azuredisk`)
backend: local
# -- Set filesystem type (`ext4`/`xfs`)
fsType: "ext4"
# Storage parametrs example for local volumes on the K8S nodes filesystem (i.e. `storage.backend=local`)
# https://kubernetes.io/docs/concepts/storage/volumes/#local
# @default -- See below
local:
# -- Set volume path on the host filesystem
volumeParentDir: /var/lib/codefresh/dind-volumes
localVolumeMonitor: # lv-monitor DaemonSet (only for `storage.backend: local`)
# `dind-lv-monitor` DaemonSet parametes (deployed only when `storage.backend=local`)
# @default -- See below
localVolumeMonitor:
# -- Set `dind-lv-monitor` image
image: codefresh/dind-volume-utils:1.29.3
nodeSelector: {}
resources: {}
tolerations: []
env: {}
# Storage example for aws ebs disks
# Storage parameters example for aws ebs disks (i.e. `storage.backend=ebs`/`storage.backend=ebs-csi`)
# https://aws.amazon.com/ebs/
# https://codefresh.io/docs/docs/administration/codefresh-runner/#installing-on-aws
# @default -- See below
ebs:
volumeType: "" # gp2(default), gp3 or io1
availabilityZone: "" # valid aws zone
encrypted: "" # encrypt volume (false by default)
kmsKeyId: "" # (Optional) KMS Key ID
accessKeyId: "" # (Optional) AWS_ACCESS_KEY_ID
secretAccessKey: "" # (Optional) AWS_SECRET_ACCESS_KEY
## e.g:
# -- Set EBS volume type (`gp2`/`gp3`/`io1`) (required)
volumeType: "gp2"
# -- Set EBS volumes availability zone (required)
availabilityZone: "us-east-1a"
# -- Enable encryption (optional)
encrypted: "false"
# -- Set KMS encryption key ID (optional)
kmsKeyId: ""
# -- Set AWS_ACCESS_KEY_ID for volume-provisioner (optional)
# Ref: https://codefresh.io/docs/docs/installation/codefresh-runner/#dind-volume-provisioner-permissions
accessKeyId: ""
# -- Set AWS_SECRET_ACCESS_KEY for volume-provisioner (optional)
# Ref: https://codefresh.io/docs/docs/installation/codefresh-runner/#dind-volume-provisioner-permissions
secretAccessKey: ""
# E.g:
# ebs:
# volumeType: gp3
# availabilityZone: us-east-1c
@ -116,16 +162,20 @@ storage: # Storage parameters for Volume-Provisioner
# availabilityZone: us-east-1c
# encrypted: true
# kmsKeyId: "1234abcd-12ab-34cd-56ef-1234567890ab"
# accessKeyId: "AKIAIOSFODNN7EXAMPLE"
# secretAccessKey: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
# accessKeyId: "MYKEYID"
# secretAccessKey: "MYACCESSKEY"
# Storage example for gce disks
# Storage parameters example for gce disks
# https://cloud.google.com/compute/docs/disks#pdspecs
# https://codefresh.io/docs/docs/administration/codefresh-runner/#installing-on-google-kubernetes-engine
# @default -- See below
gcedisk:
volumeType: "" # pd-ssd(default) or pd-standard
availabilityZone: "" # valid gcp zone
serviceAccountJson: "" # (Optional) Google SA JSON key
# -- Set GCP volume backend type (`pd-ssd`/`pd-standard`)
volumeType: "pd-ssd"
# -- Set GCP volume availability zone
availabilityZone: "us-west1-a"
# -- Set Google SA JSON key for volume-provisioner (optional)
serviceAccountJson: ""
## e.g:
# gcedisk:
# volumeType: pd-ssd
@ -144,29 +194,36 @@ storage: # Storage parameters for Volume-Provisioner
# "client_x509_cert_url": "..."
# }
# Storage example for Azure Disks
# Storage parameters example for Azure Disks
# https://codefresh.io/docs/docs/administration/codefresh-runner/#installing-on-aks
# @default -- See below
azuredisk:
skuName: Premium_LRS #default
# -- Set storage type (`Premium_LRS`)
skuName: Premium_LRS
cachingMode: None
# location: westcentralus
# resourceGroup:
# DiskIOPSReadWrite: 500
# DiskMBpsReadWrite: 100
# Set runtime parameters
# @default -- See below
re:
# Optionally add an AWS IAM role to your pipelines
# More info: https://codefresh.io/docs/docs/administration/codefresh-runner/#injecting-aws-arn-roles-into-the-cluster
## e.g:
# re:
# -- Set annotation on engine Service Account
# Ref: https://codefresh.io/docs/docs/administration/codefresh-runner/#injecting-aws-arn-roles-into-the-cluster
serviceAccount:
annotations: {}
# E.g
# serviceAccount:
# annotations: # will be set on codefresh-engine service account
# annotations:
# eks.amazonaws.com/role-arn: "arn:aws:iam::<ACCOUNT_ID>:role/<IAM_ROLE_NAME>"
dindDaemon: # dind daemon config
# DinD pod daemon config
# @default -- See below
dindDaemon:
hosts:
- unix:///var/run/docker.sock
- tcp://0.0.0.0:1300
storage-driver: overlay2
tlsverify: true
tls: true
tlscacert: /etc/ssl/cf-client/ca.pem
@ -177,19 +234,28 @@ re:
metrics-addr: 0.0.0.0:9323
experimental: true
appProxy: # App-Proxy Deployment
# App-Proxy parameters
# Ref: https://codefresh.io/docs/docs/installation/codefresh-runner/#app-proxy-installation
# @default -- See below
appProxy:
# -- Enable app-proxy
enabled: false
# -- Set app-proxy image
image: "codefresh/cf-app-proxy:latest"
env: {}
## e.g:
# env:
# LOG_LEVEL: debug
# Set app-proxy ingress parameters
# @default -- See below
ingress:
pathPrefix: "" # Specify path prefix for ingress (default is '/')
class: "" # Specify ingress class
host: "" # Specify DNS hostname the ingress will use
tlsSecret: "" # Specify k8s tls secret for the ingress object
annotations: {} # Specify extra annotations for ingress object
# -- Set path prefix for ingress
pathPrefix: "/"
# -- Set ingress class
class: ""
# -- Set DNS hostname the ingress will use
host: ""
# -- Set k8s tls secret for the ingress object
tlsSecret: ""
# -- Set extra annotations for ingress object
annotations: {}
## e.g:
# ingress:
# pathPrefix: "/app-proxy"
@ -198,18 +264,29 @@ appProxy: # App-Proxy Deployment
# tlsSecret: "tls-cert-app-proxy"
# annotations:
# nginx.ingress.kubernetes.io/whitelist-source-range: 123.123.123.123/130
resources: {}
serviceAccount:
annotations: {}
monitor: # Monitor Deployment
enabled: false
image: "codefresh/agent:stable"
helm3: true
useNamespaceWideRole: false # Use ClusterRole(false) or Role(true)
clusterId: "" # Cluster name as it registered in account
token: "" # API token from Codefresh
existingMonitorToken: "" # Existing secret (name-of-existing-secret) with API token from Codefresh (supersedes value of monitor.token; secret must contain `codefresh.token` key)
env: {}
resources: {}
serviceAccount:
annotations: {}
# Monitor parameters
# @default -- See below
monitor:
# -- Enable monitor
# Ref: https://codefresh.io/docs/docs/installation/codefresh-runner/#install-monitoring-component
enabled: false
# -- Set monitor image
image: "codefresh/agent:stable"
# -- keep true as default!
helm3: true
# -- Use ClusterRole (`false`) or Role (`true`) RBAC
useNamespaceWideRole: false #
# -- Cluster name as it registered in account
clusterId: ""
# -- API token from Codefresh
token: ""
# -- Set Existing secret (name-of-existing-secret) with API token from Codefresh (supersedes value of monitor.token; secret must contain `codefresh.token` key)
existingMonitorToken: ""
env: {}
resources: {}
serviceAccount:

View File

@ -1,5 +1,11 @@
# Changelog
## 0.10.0
* Add ability to use the conversion webhook
* Add dependency on the cert manager to manage the certificates of the conversion webhook
* Note that the option to enable the various CRDs has changed from `datadog-crds` to `datadogCRDs`.
## 0.9.2
* Updating CRD dependency to DatadogMonitors and DatadogAgent.

View File

@ -1,6 +1,6 @@
dependencies:
- name: datadog-crds
repository: https://helm.datadoghq.com
version: 0.5.9
digest: sha256:c40a5810badfd08a8a278a83c8574ece0fd5995f50ff35faaf72f7e5e90bfcbd
generated: "2022-12-08T14:52:07.581272-05:00"
version: 0.6.1
digest: sha256:bbebf7e0049b5ebaeb6b6828ec3926965f69ec62b53f3524591f453f098136ed
generated: "2023-03-27T17:13:21.699269-04:00"

View File

@ -3,14 +3,15 @@ annotations:
catalog.cattle.io/display-name: Datadog Operator
catalog.cattle.io/release-name: datadog-operator
apiVersion: v2
appVersion: 0.8.4
appVersion: 1.0.0
dependencies:
- condition: installCRDs
- alias: datadogCRDs
condition: installCRDs
name: datadog-crds
repository: file://./charts/datadog-crds
tags:
- install-crds
version: =0.5.9
version: =0.6.1
description: Datadog Operator
home: https://www.datadoghq.com
icon: https://datadog-live.imgix.net/img/dd_logo_70x75.png
@ -25,4 +26,4 @@ name: datadog-operator
sources:
- https://app.datadoghq.com/account/settings#agent/kubernetes
- https://github.com/DataDog/datadog-agent
version: 0.9.2
version: 0.10.0

View File

@ -1,6 +1,6 @@
# Datadog Operator
![Version: 0.9.2](https://img.shields.io/badge/Version-0.9.2-informational?style=flat-square) ![AppVersion: 0.8.4](https://img.shields.io/badge/AppVersion-0.8.4-informational?style=flat-square)
![Version: 0.10.0](https://img.shields.io/badge/Version-0.10.0-informational?style=flat-square) ![AppVersion: 1.0.0](https://img.shields.io/badge/AppVersion-1.0.0-informational?style=flat-square)
## Values
@ -13,10 +13,14 @@
| appKeyExistingSecret | string | `nil` | Use existing Secret which stores APP key instead of creating a new one |
| collectOperatorMetrics | bool | `true` | Configures an openmetrics check to collect operator metrics |
| containerSecurityContext | object | `{}` | A security context defines privileges and access control settings for a container. |
| datadog-crds.crds.datadogAgents | bool | `true` | Set to true to deploy the DatadogAgents CRD |
| datadog-crds.crds.datadogMetrics | bool | `true` | Set to true to deploy the DatadogMetrics CRD |
| datadog-crds.crds.datadogMonitors | bool | `true` | Set to true to deploy the DatadogMonitors CRD |
| datadog-crds.migration.datadogAgents.version | string | `"v1alpha1"` | |
| datadogCRDs.crds.datadogAgents | bool | `true` | |
| datadogCRDs.crds.datadogMetrics | bool | `true` | |
| datadogCRDs.crds.datadogMonitors | bool | `true` | |
| datadogCRDs.migration.datadogAgents.conversionWebhook.enabled | bool | `false` | |
| datadogCRDs.migration.datadogAgents.conversionWebhook.name | string | `"datadog-operator-webhook-service"` | |
| datadogCRDs.migration.datadogAgents.conversionWebhook.namespace | string | `"default"` | |
| datadogCRDs.migration.datadogAgents.useCertManager | bool | `false` | |
| datadogCRDs.migration.datadogAgents.version | string | `"v1alpha1"` | |
| datadogMonitor.enabled | bool | `false` | Enables the Datadog Monitor controller |
| dd_url | string | `nil` | The host of the Datadog intake server to send Agent data to, only set this option if you need the Agent to send data to a custom URL |
| env | list | `[]` | Define any environment variables to be passed to the operator. |
@ -44,7 +48,6 @@
| supportExtendedDaemonset | string | `"false"` | If true, supports using ExtendedDeamonSet CRD |
| tolerations | list | `[]` | Allows to schedule Datadog Operator on tainted nodes |
| watchNamespaces | list | `[]` | Restrics the Operator to watch its managed resources on specific namespaces |
| webhook | object | `{"conversion":{"enabled":false}}` | configure webhook servers |
## How to configure which namespaces are watched by the Operator.
@ -64,3 +67,181 @@ To watch all namespaces, the following configuration needs to be used:
watchNamespaces:
- ""
```
## Migrating to the version 1.0 of the Datadog Operator
### Disclaimer
As part of the General Availability release of the Datadog Operator, we are offering a migration path for our early adopters to migrate to the GA version of the custom resource, `v2alpha1/DatadogAgent`.
The Datadog Operator v1.X reconciles the version `v2alpha1` of the DatadogAgent custom resource, while the v0.X recociles `v1alpha1`.
In the following documentation, you will find mentions of the image with a `rc` (release candidate) tag. We will update it to the official `1.0.0` tag upon releasing.
Consider the following steps with the same maturity (beta) level as the project.
### Requirements
If you are using the v1alpha1 with a v0.X version of the Datadog Operator and would like to upgrade, you will need to use the Conversion Webhook feature.
Start by ensuring that you have the minimum required version of the chart and it's dependencies:
```
NAME CHART VERSION APP VERSION DESCRIPTION
datadog/datadog-crds 0.6.1 1 Datadog Kubernetes CRDs chart
```
and for the Datadog Operator chart:
```
NAME CHART VERSION APP VERSION DESCRIPTION
datadog/datadog-operator 0.10.0 1.0.0 Datadog Operator
```
Then you will need to install the cert manager if you don't have it already, add the chart:
```
helm repo add jetstack https://charts.jetstack.io
```
and then install it:
```
helm install \
cert-manager jetstack/cert-manager \
--version v1.11.0 \
--set installCRDs=true
```
### Migration
You can update with the following:
```
helm upgrade \
datadog-operator datadog/datadog-operator \
--set image.tag=1.0.0-rc.12 \
--set datadogCRDs.migration.datadogAgents.version=v2alpha1 \
--set datadogCRDs.migration.datadogAgents.useCertManager=true \
--set datadogCRDs.migration.datadogAgents.conversionWebhook.enabled=true
```
### Implementation details
This will create a self-signed `Certificate` (using an `Issuer`) that will be used by the Certificate Manager to mutate the DatadogAgent CRD to document the `caBundle` that the API Server will use to contact the Conversion Webhhok.
The Datadog Operator will be running the new reconciler for `v2alpha1` object and will also start a Conversion Webhhok Server, exposed on port 9443. This server is the one the API Server will be using to convert v1alpha1 DatadogAgent into v2alpha1.
### Lifecycle
The conversionWebhook is not supposed to be an ever running process, we recommend using it to migrate your objects as a transition.
Once converted, you can store the new version of your DatadogAgent, deactivate the conversion and simply deploy v2alpha1 objects.
### Roadmap
Upon releasing the v2 version of the DatadogAgent object, we will remove v1alpha1 from the CRD as part of a major update of the charts (datadog-crds and datadog-operator).
### Troubleshooting
* I don't see v2alpha1 version of the DatadogAgent resource
The v1alpha1 and the v2alpha1 are `served` so you might need to specify which version you want to see:
```
kubectl get datadogagents.v2alpha1.datadoghq.com datadog-agent
```
* The Conversion is not working
The logs of the Datadog Operator pod should show that the conversion webhook is enabled, the server is running, the certificates are watched.
```
kubectl logs datadog-operator-XXX-YYY
[...]
{"level":"INFO","ts":"2023-02-16T16:47:07Z","logger":"controller-runtime.webhook","msg":"Registering webhook","path":"/convert"}
{"level":"INFO","ts":"2023-02-16T16:47:07Z","logger":"controller-runtime.builder","msg":"Conversion webhook enabled","GVK":"datadoghq.com/v2alpha1, Kind=DatadogAgent"}
{"level":"INFO","ts":"2023-02-16T16:47:07Z","logger":"setup","msg":"starting manager"}
{"level":"INFO","ts":"2023-02-16T16:47:07Z","logger":"controller-runtime.webhook.webhooks","msg":"Starting webhook server"}
{"level":"INFO","ts":"2023-02-16T16:47:07Z","logger":"controller-runtime.certwatcher","msg":"Updated current TLS certificate"}
{"level":"INFO","ts":"2023-02-16T16:47:07Z","logger":"controller-runtime.webhook","msg":"Serving webhook server","host":"","port":9443}
{"level":"INFO","ts":"2023-02-16T16:47:07Z","msg":"Starting server","path":"/metrics","kind":"metrics","addr":"0.0.0.0:8383"}
{"level":"INFO","ts":"2023-02-16T16:47:07Z","msg":"Starting server","kind":"health probe","addr":"0.0.0.0:8081"}
{"level":"INFO","ts":"2023-02-16T16:47:07Z","logger":"controller-runtime.certwatcher","msg":"Starting certificate watcher"}
[...]
```
* Check the service registered for the conversion for a registered Endpoint
```
kubectl describe service datadog-operator-webhook-service
[...]
Name: datadog-operator-webhook-service
Namespace: default
[...]
Selector: app.kubernetes.io/instance=datadog-operator,app.kubernetes.io/name=datadog-operator
[...]
Port: <unset> 443/TCP
TargetPort: 9443/TCP
Endpoints: 10.88.3.28:9443
```
* Verify the registered service for the conversion webhook
```
kubectl describe crd datadogagents.datadoghq.com
[...]
Conversion:
Strategy: Webhook
Webhook:
Client Config:
Ca Bundle: LS0t[...]UtLS0tLQo=
Service:
Name: datadog-operator-webhook-service
Namespace: default
Path: /convert
Port: 443
Conversion Review Versions:
v1
```
* The CRD does not have the `caBundle`
Make sure that the CRD has the correct annotation: `cert-manager.io/inject-ca-from: default/datadog-operator-serving-cert` and check the logs of the `cert-manager-cainjector` pod.
If you do not see anything stading out, setting the log level to 5 (debug) might help:
```
kubectl edit deploy cert-manager-cainjector -n cert-manager
[...]
spec:
containers:
- args:
- --v=5
[...]
```
You should see logs such as:
```
[...]
I0217 08:11:15.582479 1 controller.go:178] cert-manager/certificate/customresourcedefinition/generic-inject-reconciler "msg"="updated object" "resource_kind"="CustomResourceDefinition" "resource_name"="datadogagents.datadoghq.com" "resource_namespace"="" "resource_version"="v1"
I0217 08:25:24.989209 1 sources.go:98] cert-manager/certificate/customresourcedefinition/generic-inject-reconciler "msg"="Extracting CA from Certificate resource" "certificate"="default/datadog-operator-serving-cert" "resource_kind"="CustomResourceDefinition" "resource_name"="datadogagents.datadoghq.com" "resource_namespace"="" "resource_version"="v1"
[...]
```
### Rollback
If you migrated to the new version of the Datadog Operator using v2alpha1 but want to rollback to the former version, we recommend:
- Scaling the Datadog Operator deployment to 0 replicas.
```
kubectl scale deploy datadog-operator --replicas=0
```
- Upgrading the chart to have v1alpha1 stored and for the Datadog Operator to use the 0.8.X image.
```
helm upgrade \
datadog-operator datadog/datadog-operator \
--set image.tag=0.8.4 \
--set datadogCRDs.migration.datadogAgents.version=v1alpha1 \
--set datadogCRDs.migration.datadogAgents.useCertManager=false \
--set datadogCRDs.migration.datadogAgents.conversionWebhook.enabled=false
```
- Redeploy the previous DatadogAgent v1alpha1 object.
Note: The Daemonset of the Datadog Agents will be rolled out in the process.

View File

@ -22,3 +22,181 @@ To watch all namespaces, the following configuration needs to be used:
watchNamespaces:
- ""
```
## Migrating to the version 1.0 of the Datadog Operator
### Disclaimer
As part of the General Availability release of the Datadog Operator, we are offering a migration path for our early adopters to migrate to the GA version of the custom resource, `v2alpha1/DatadogAgent`.
The Datadog Operator v1.X reconciles the version `v2alpha1` of the DatadogAgent custom resource, while the v0.X recociles `v1alpha1`.
In the following documentation, you will find mentions of the image with a `rc` (release candidate) tag. We will update it to the official `1.0.0` tag upon releasing.
Consider the following steps with the same maturity (beta) level as the project.
### Requirements
If you are using the v1alpha1 with a v0.X version of the Datadog Operator and would like to upgrade, you will need to use the Conversion Webhook feature.
Start by ensuring that you have the minimum required version of the chart and it's dependencies:
```
NAME CHART VERSION APP VERSION DESCRIPTION
datadog/datadog-crds 0.6.1 1 Datadog Kubernetes CRDs chart
```
and for the Datadog Operator chart:
```
NAME CHART VERSION APP VERSION DESCRIPTION
datadog/datadog-operator 0.10.0 1.0.0 Datadog Operator
```
Then you will need to install the cert manager if you don't have it already, add the chart:
```
helm repo add jetstack https://charts.jetstack.io
```
and then install it:
```
helm install \
cert-manager jetstack/cert-manager \
--version v1.11.0 \
--set installCRDs=true
```
### Migration
You can update with the following:
```
helm upgrade \
datadog-operator datadog/datadog-operator \
--set image.tag=1.0.0-rc.12 \
--set datadogCRDs.migration.datadogAgents.version=v2alpha1 \
--set datadogCRDs.migration.datadogAgents.useCertManager=true \
--set datadogCRDs.migration.datadogAgents.conversionWebhook.enabled=true
```
### Implementation details
This will create a self-signed `Certificate` (using an `Issuer`) that will be used by the Certificate Manager to mutate the DatadogAgent CRD to document the `caBundle` that the API Server will use to contact the Conversion Webhhok.
The Datadog Operator will be running the new reconciler for `v2alpha1` object and will also start a Conversion Webhhok Server, exposed on port 9443. This server is the one the API Server will be using to convert v1alpha1 DatadogAgent into v2alpha1.
### Lifecycle
The conversionWebhook is not supposed to be an ever running process, we recommend using it to migrate your objects as a transition.
Once converted, you can store the new version of your DatadogAgent, deactivate the conversion and simply deploy v2alpha1 objects.
### Roadmap
Upon releasing the v2 version of the DatadogAgent object, we will remove v1alpha1 from the CRD as part of a major update of the charts (datadog-crds and datadog-operator).
### Troubleshooting
* I don't see v2alpha1 version of the DatadogAgent resource
The v1alpha1 and the v2alpha1 are `served` so you might need to specify which version you want to see:
```
kubectl get datadogagents.v2alpha1.datadoghq.com datadog-agent
```
* The Conversion is not working
The logs of the Datadog Operator pod should show that the conversion webhook is enabled, the server is running, the certificates are watched.
```
kubectl logs datadog-operator-XXX-YYY
[...]
{"level":"INFO","ts":"2023-02-16T16:47:07Z","logger":"controller-runtime.webhook","msg":"Registering webhook","path":"/convert"}
{"level":"INFO","ts":"2023-02-16T16:47:07Z","logger":"controller-runtime.builder","msg":"Conversion webhook enabled","GVK":"datadoghq.com/v2alpha1, Kind=DatadogAgent"}
{"level":"INFO","ts":"2023-02-16T16:47:07Z","logger":"setup","msg":"starting manager"}
{"level":"INFO","ts":"2023-02-16T16:47:07Z","logger":"controller-runtime.webhook.webhooks","msg":"Starting webhook server"}
{"level":"INFO","ts":"2023-02-16T16:47:07Z","logger":"controller-runtime.certwatcher","msg":"Updated current TLS certificate"}
{"level":"INFO","ts":"2023-02-16T16:47:07Z","logger":"controller-runtime.webhook","msg":"Serving webhook server","host":"","port":9443}
{"level":"INFO","ts":"2023-02-16T16:47:07Z","msg":"Starting server","path":"/metrics","kind":"metrics","addr":"0.0.0.0:8383"}
{"level":"INFO","ts":"2023-02-16T16:47:07Z","msg":"Starting server","kind":"health probe","addr":"0.0.0.0:8081"}
{"level":"INFO","ts":"2023-02-16T16:47:07Z","logger":"controller-runtime.certwatcher","msg":"Starting certificate watcher"}
[...]
```
* Check the service registered for the conversion for a registered Endpoint
```
kubectl describe service datadog-operator-webhook-service
[...]
Name: datadog-operator-webhook-service
Namespace: default
[...]
Selector: app.kubernetes.io/instance=datadog-operator,app.kubernetes.io/name=datadog-operator
[...]
Port: <unset> 443/TCP
TargetPort: 9443/TCP
Endpoints: 10.88.3.28:9443
```
* Verify the registered service for the conversion webhook
```
kubectl describe crd datadogagents.datadoghq.com
[...]
Conversion:
Strategy: Webhook
Webhook:
Client Config:
Ca Bundle: LS0t[...]UtLS0tLQo=
Service:
Name: datadog-operator-webhook-service
Namespace: default
Path: /convert
Port: 443
Conversion Review Versions:
v1
```
* The CRD does not have the `caBundle`
Make sure that the CRD has the correct annotation: `cert-manager.io/inject-ca-from: default/datadog-operator-serving-cert` and check the logs of the `cert-manager-cainjector` pod.
If you do not see anything stading out, setting the log level to 5 (debug) might help:
```
kubectl edit deploy cert-manager-cainjector -n cert-manager
[...]
spec:
containers:
- args:
- --v=5
[...]
```
You should see logs such as:
```
[...]
I0217 08:11:15.582479 1 controller.go:178] cert-manager/certificate/customresourcedefinition/generic-inject-reconciler "msg"="updated object" "resource_kind"="CustomResourceDefinition" "resource_name"="datadogagents.datadoghq.com" "resource_namespace"="" "resource_version"="v1"
I0217 08:25:24.989209 1 sources.go:98] cert-manager/certificate/customresourcedefinition/generic-inject-reconciler "msg"="Extracting CA from Certificate resource" "certificate"="default/datadog-operator-serving-cert" "resource_kind"="CustomResourceDefinition" "resource_name"="datadogagents.datadoghq.com" "resource_namespace"="" "resource_version"="v1"
[...]
```
### Rollback
If you migrated to the new version of the Datadog Operator using v2alpha1 but want to rollback to the former version, we recommend:
- Scaling the Datadog Operator deployment to 0 replicas.
```
kubectl scale deploy datadog-operator --replicas=0
```
- Upgrading the chart to have v1alpha1 stored and for the Datadog Operator to use the 0.8.X image.
```
helm upgrade \
datadog-operator datadog/datadog-operator \
--set image.tag=0.8.4 \
--set datadogCRDs.migration.datadogAgents.version=v1alpha1 \
--set datadogCRDs.migration.datadogAgents.useCertManager=false \
--set datadogCRDs.migration.datadogAgents.conversionWebhook.enabled=false
```
- Redeploy the previous DatadogAgent v1alpha1 object.
Note: The Daemonset of the Datadog Agents will be rolled out in the process.

View File

@ -1,5 +1,14 @@
# Changelog
## 0.6.1
* Add missing `nodeLabelsAsTags` and `namespaceLabelsAsTags` to the v2alpha1 spec.
## 0.6.0
* Support Certificate Manager.
* Document conversion webhook configuration.
## 0.5.9
* Updating DatadogMonitors CRD and DatadogAgents CRDs.

View File

@ -15,4 +15,4 @@ sources:
- https://app.datadoghq.com/account/settings#agent/kubernetes
- https://github.com/DataDog/datadog-operator
- https://docs.datadoghq.com/agent/cluster_agent/external_metrics
version: 0.5.9
version: 0.6.1

View File

@ -1,6 +1,6 @@
# Datadog CRDs
![Version: 0.5.9](https://img.shields.io/badge/Version-0.5.9-informational?style=flat-square) ![AppVersion: 1](https://img.shields.io/badge/AppVersion-1-informational?style=flat-square)
![Version: 0.6.1](https://img.shields.io/badge/Version-0.6.1-informational?style=flat-square) ![AppVersion: 1](https://img.shields.io/badge/AppVersion-1-informational?style=flat-square)
This chart was designed to allow other "datadog" charts to share `CustomResourceDefinitions` such as the `DatadogMetric`.
@ -26,6 +26,10 @@ But the recommended Kubernetes versions are `1.16+`.
| crds.datadogMetrics | bool | `false` | Set to true to deploy the DatadogMetrics CRD |
| crds.datadogMonitors | bool | `false` | Set to true to deploy the DatadogMonitors CRD |
| fullnameOverride | string | `""` | Override the fully qualified app name |
| migration.datadogAgents.conversionWebhook.enabled | bool | `false` | |
| migration.datadogAgents.conversionWebhook.name | string | `"datadog-operator-webhook-service"` | |
| migration.datadogAgents.conversionWebhook.namespace | string | `"default"` | |
| migration.datadogAgents.useCertManager | bool | `false` | |
| migration.datadogAgents.version | string | `"v1alpha1"` | |
| nameOverride | string | `""` | Override name of app |

View File

@ -4,6 +4,9 @@ kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.6.1
{{- if .Values.migration.datadogAgents.useCertManager }}
cert-manager.io/inject-ca-from: {{ .Release.Namespace }}/{{ .Release.Name }}-serving-cert
{{- end }}
creationTimestamp: null
name: datadogagents.datadoghq.com
labels:
@ -12,6 +15,18 @@ metadata:
app.kubernetes.io/name: '{{ include "datadog-crds.name" . }}'
app.kubernetes.io/instance: '{{ .Release.Name }}'
spec:
{{- if .Values.migration.datadogAgents.conversionWebhook.enabled }}
conversion:
strategy: Webhook
webhook:
clientConfig:
service:
namespace: {{ .Values.migration.datadogAgents.conversionWebhook.namespace }}
name: {{ .Values.migration.datadogAgents.conversionWebhook.name }}
path: /convert
conversionReviewVersions:
- v1
{{- end }}
group: datadoghq.com
names:
kind: DatadogAgent
@ -6425,6 +6440,14 @@ spec:
flavor:
type: string
type: object
namespaceLabelsAsTags:
additionalProperties:
type: string
type: object
nodeLabelsAsTags:
additionalProperties:
type: string
type: object
podAnnotationsAsTags:
additionalProperties:
type: string

View File

@ -12,7 +12,11 @@ crds:
migration:
datadogAgents:
# enabled: false # Will be used when we add the option to add the webhookConversion field in the CRD.
conversionWebhook:
enabled: false
name: datadog-operator-webhook-service
namespace: default
useCertManager: false
version: "v1alpha1"
# nameOverride -- Override name of app

View File

@ -0,0 +1,26 @@
# The following manifests contain a self-signed issuer CR and a certificate CR.
# More document can be found at https://docs.cert-manager.io
# WARNING: Targets CertManager v1.0. Check https://cert-manager.io/docs/installation/upgrading/ for breaking changes.
{{- if and .Values.datadogCRDs.migration.datadogAgents.conversionWebhook.enabled .Values.datadogCRDs.migration.datadogAgents.useCertManager }}
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: {{ include "datadog-operator.name" . }}-selfsigned-issuer
namespace: {{ .Release.Namespace }}
spec:
selfSigned: {}
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: {{ include "datadog-operator.name" . }}-serving-cert
namespace: {{ .Release.Namespace }}
spec:
dnsNames:
- {{ .Values.datadogCRDs.migration.datadogAgents.conversionWebhook.name }}.{{ .Values.datadogCRDs.migration.datadogAgents.conversionWebhook.namespace }}.svc
- {{ .Values.datadogCRDs.migration.datadogAgents.conversionWebhook.name }}.{{ .Values.datadogCRDs.migration.datadogAgents.conversionWebhook.namespace }}.svc.cluster.local
issuerRef:
kind: Issuer
name: {{ include "datadog-operator.name" . }}-selfsigned-issuer
secretName: {{ include "datadog-operator.name" . }}-webhook-server-cert
{{- end }}

View File

@ -93,7 +93,7 @@ spec:
- "-logEncoder=json"
- "-metrics-addr=:{{ .Values.metricsPort }}"
- "-loglevel={{ .Values.logLevel }}"
{{- if and (not .Values.webhook.conversion.enabled) (semverCompare ">=1.0.0-0" .Values.image.tag ) }}
{{- if and (not .Values.datadogCRDs.migration.datadogAgents.conversionWebhook.enabled) (semverCompare ">=1.0.0-0" .Values.image.tag ) }}
- "-webhookEnabled=false"
{{- end }}
{{- if .Values.secretBackend.command }}
@ -107,6 +107,11 @@ spec:
- name: metrics
containerPort: {{ .Values.metricsPort }}
protocol: TCP
{{- if .Values.datadogCRDs.migration.datadogAgents.conversionWebhook.enabled }}
- name: webhook
containerPort: 9443
protocol: TCP
{{- end }}
livenessProbe:
httpGet:
path: /healthz/
@ -114,6 +119,12 @@ spec:
periodSeconds: 10
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- if .Values.datadogCRDs.migration.datadogAgents.conversionWebhook.enabled }}
volumeMounts:
- mountPath: /tmp/k8s-webhook-server/serving-certs
name: cert
readOnly: true
{{- end }}
{{- if .Values.containerSecurityContext }}
securityContext:
{{- toYaml .Values.containerSecurityContext | nindent 12 }}
@ -130,3 +141,10 @@ spec:
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.datadogCRDs.migration.datadogAgents.conversionWebhook.enabled }}
volumes:
- name: cert
secret:
defaultMode: 420
secretName: {{ include "datadog-operator.name" . }}-webhook-server-cert
{{- end }}

View File

@ -0,0 +1,14 @@
{{- if .Values.datadogCRDs.migration.datadogAgents.conversionWebhook.enabled -}}
apiVersion: v1
kind: Service
metadata:
name: {{ .Values.datadogCRDs.migration.datadogAgents.conversionWebhook.name }}
namespace: {{ .Values.datadogCRDs.migration.datadogAgents.conversionWebhook.namespace }}
spec:
ports:
- port: 443
targetPort: 9443
selector:
app.kubernetes.io/name: {{ include "datadog-operator.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}

View File

@ -98,7 +98,7 @@ affinity: {}
# installCRDs -- Set to true to deploy the Datadog's CRDs
installCRDs: true
datadog-crds:
datadogCRDs:
crds:
# datadog-crds.crds.datadogAgents -- Set to true to deploy the DatadogAgents CRD
datadogAgents: true
@ -108,14 +108,13 @@ datadog-crds:
datadogMonitors: true
migration:
datadogAgents:
conversionWebhook:
enabled: false
name: datadog-operator-webhook-service
namespace: default
useCertManager: false
version: "v1alpha1"
# webhook -- configure webhook servers
webhook:
## Use the conversion webhook server when multiple versions of objects managed by the Datadog Operator are served.
conversion:
enabled: false
# podAnnotations -- Allows setting additional annotations for Datadog Operator PODs
podAnnotations: {}
# podLabels -- Allows setting additional labels for for Datadog Operator PODs

View File

@ -1,5 +1,14 @@
# Datadog changelog
# 3.23.0
* Injects additional environment variables in the Cluster Agent
* Add `clusterAgent.rbac.flareAdditionalPermissions` parameter to enable user Helm values retrieval in DCA flare (`true` by default)
# 3.22.0
* Auto-configure `clusterAgent.admissionController.configMode` based on `datadog.apm.socketEnabled|portEnabled`.
## 3.21.0
* Add `datadog.remoteConfiguration.enabled` parameter to enable remote configuration.

View File

@ -19,4 +19,4 @@ name: datadog
sources:
- https://app.datadoghq.com/account/settings#agent/kubernetes
- https://github.com/DataDog/datadog-agent
version: 3.21.0
version: 3.23.0

View File

@ -1,6 +1,6 @@
# Datadog
![Version: 3.21.0](https://img.shields.io/badge/Version-3.21.0-informational?style=flat-square) ![AppVersion: 7](https://img.shields.io/badge/AppVersion-7-informational?style=flat-square)
![Version: 3.23.0](https://img.shields.io/badge/Version-3.23.0-informational?style=flat-square) ![AppVersion: 7](https://img.shields.io/badge/AppVersion-7-informational?style=flat-square)
[Datadog](https://www.datadoghq.com/) is a hosted infrastructure monitoring platform. This chart adds the Datadog Agent to all nodes in your cluster via a DaemonSet. It also optionally depends on the [kube-state-metrics chart](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-state-metrics). For more information about monitoring Kubernetes with Datadog, please refer to the [Datadog documentation website](https://docs.datadoghq.com/agent/basic_agent_usage/kubernetes/).
@ -527,6 +527,7 @@ helm install <RELEASE_NAME> \
| clusterAgent.priorityClassName | string | `nil` | Name of the priorityClass to apply to the Cluster Agent |
| clusterAgent.rbac.automountServiceAccountToken | bool | `true` | If true, automatically mount the ServiceAccount's API credentials if clusterAgent.rbac.create is true |
| clusterAgent.rbac.create | bool | `true` | If true, create & use RBAC resources |
| clusterAgent.rbac.flareAdditionalPermissions | bool | `true` | If true, add Secrets and Configmaps get/list permissions to retrieve user Datadog Helm values from Cluster Agent namespace |
| clusterAgent.rbac.serviceAccountAnnotations | object | `{}` | Annotations to add to the ServiceAccount if clusterAgent.rbac.create is true |
| clusterAgent.rbac.serviceAccountName | string | `"default"` | Specify a preexisting ServiceAccount to use if clusterAgent.rbac.create is false |
| clusterAgent.readinessProbe | object | Every 15s / 6 KO / 1 OK | Override default Cluster Agent readiness probe settings |

View File

@ -0,0 +1,9 @@
datadog:
apiKey: "00000000000000000000000000000000"
appKey: "0000000000000000000000000000000000000000"
apm:
enabled: false
clusterAgent:
enabled: true
admissionController:
enabled: true

View File

@ -0,0 +1,12 @@
datadog:
apiKey: "00000000000000000000000000000000"
appKey: "0000000000000000000000000000000000000000"
apm:
enabled: true
clusterAgent:
enabled: true
admissionController:
enabled: true
agents:
localService:
forceLocalServiceEnabled: false

View File

@ -0,0 +1,9 @@
datadog:
apiKey: "00000000000000000000000000000000"
appKey: "0000000000000000000000000000000000000000"
apm:
portEnabled: true
clusterAgent:
enabled: true
admissionController:
enabled: true

View File

@ -0,0 +1,10 @@
datadog:
apiKey: "00000000000000000000000000000000"
appKey: "0000000000000000000000000000000000000000"
apm:
socketEnabled: true
portEnabled: true
clusterAgent:
enabled: true
admissionController:
enabled: true

View File

@ -0,0 +1,9 @@
datadog:
apiKey: "00000000000000000000000000000000"
appKey: "0000000000000000000000000000000000000000"
apm:
socketEnabled: true
clusterAgent:
enabled: true
admissionController:
enabled: true

View File

@ -178,14 +178,20 @@ spec:
value: {{ .Values.clusterAgent.admissionController.mutateUnlabelled | quote }}
- name: DD_ADMISSION_CONTROLLER_SERVICE_NAME
value: {{ template "datadog.fullname" . }}-cluster-agent-admission-controller
{{- if .Values.clusterAgent.admissionController.configMode }}
- name: DD_ADMISSION_CONTROLLER_INJECT_CONFIG_MODE
{{- if .Values.clusterAgent.admissionController.configMode }}
value: {{ .Values.clusterAgent.admissionController.configMode }}
{{- else if eq (include "trace-agent-use-uds" .) "true" }}
value: socket
{{- else if or (eq (include "trace-agent-use-tcp-port" .) "true") ( .Values.providers.gke.autopilot )}}
value: hostip
{{- else if or (not .Values.datadog.apm.enabled ) (and (eq (include "trace-agent-use-tcp-port" .) "true") (eq (include "trace-agent-use-uds" .) "true")) }}
value: socket
{{- else }}
value: {{ .Values.clusterAgent.admissionController.configMode | quote }}
{{- if eq .Values.clusterAgent.admissionController.configMode "service" }}
{{- end }}
- name: DD_ADMISSION_CONTROLLER_INJECT_CONFIG_LOCAL_SERVICE_NAME
value: {{ template "localService.name" . }}
{{- end }}
{{- end }}
{{- if .Values.providers.aks.enabled }}
- name: DD_ADMISSION_CONTROLLER_ADD_AKS_SELECTORS
value: "true"
@ -239,6 +245,12 @@ spec:
value: {{ .Values.datadog.clusterTagger.collectKubernetesTags | quote }}
- name: DD_KUBE_RESOURCES_NAMESPACE
value: {{ .Release.Namespace }}
- name: CHART_RELEASE_NAME
value: {{ .Release.Name | quote }}
- name: AGENT_DAEMONSET
value: {{ template "datadog.fullname" . }}
- name: CLUSTER_AGENT_DEPLOYMENT
value: {{ template "datadog.fullname" . }}-cluster-agent
- name: DD_ORCHESTRATOR_EXPLORER_ENABLED
value: {{ (include "should-enable-k8s-resource-monitoring" .) | quote }}
{{- if eq (include "should-enable-k8s-resource-monitoring" .) "true" }}

View File

@ -0,0 +1,35 @@
{{- if and .Values.clusterAgent.rbac.create (eq (include "cluster-agent-enabled" .) "true") .Values.clusterAgent.rbac.flareAdditionalPermissions}}
apiVersion: {{ template "rbac.apiVersion" . }}
kind: Role
metadata:
labels:
{{ include "datadog.labels" . | indent 4 }}
name: {{ template "datadog.fullname" . }}-dca-flare
namespace: {{ .Release.Namespace }}
rules:
- apiGroups:
- ""
resources:
- secrets
- configmaps
verbs:
- get
- list
---
apiVersion: {{ template "rbac.apiVersion" . }}
kind: RoleBinding
metadata:
labels:
{{ include "datadog.labels" . | indent 4 }}
name: {{ template "datadog.fullname" . }}-dca-flare
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ template "datadog.fullname" . }}-dca-flare
subjects:
- kind: ServiceAccount
name: {{ template "datadog.fullname" . }}-cluster-agent
namespace: {{ .Release.Namespace }}
---
{{- end }}

View File

@ -858,6 +858,9 @@ clusterAgent:
# clusterAgent.rbac.create -- If true, create & use RBAC resources
create: true
# clusterAgent.rbac.flareAdditionalPermissions -- If true, add Secrets and Configmaps get/list permissions to retrieve user Datadog Helm values from Cluster Agent namespace
flareAdditionalPermissions: true
# clusterAgent.rbac.serviceAccountName -- Specify a preexisting ServiceAccount to use if clusterAgent.rbac.create is false
serviceAccountName: default
@ -937,7 +940,10 @@ clusterAgent:
# clusterAgent.admissionController.configMode -- The kind of configuration to be injected, it can be "hostip", "service", or "socket".
## If clusterAgent.admissionController.configMode is not set, the Admission Controller defaults to hostip.
## If clusterAgent.admissionController.configMode is not set:
## * and datadog.apm.socketEnabled is true, the Admission Controller uses socket.
## * and datadog.apm.portEnabled is true, the Admission Controller uses hostip.
## * Otherwise, the Admission Controller defaults to hostip.
## Note: "service" mode relies on the internal traffic service to target the agent running on the local node (requires Kubernetes v1.22+).
## ref: https://docs.datadoghq.com/agent/cluster_agent/admission_controller/#configure-apm-and-dogstatsd-communication-mode
configMode: # "hostip", "socket" or "service"

View File

@ -1,10 +1,10 @@
annotations:
catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: Dell CSI PowerStore
catalog.cattle.io/kube-version: '>= 1.21.0 < 1.26.0'
catalog.cattle.io/kube-version: '>= 1.22.0 < 1.27.0'
catalog.cattle.io/release-name: csi-powerstore
apiVersion: v2
appVersion: 2.5.1
appVersion: 2.6.0
description: 'PowerStore CSI (Container Storage Interface) driver Kubernetes integration.
This chart includes everything required to provision via CSI as well as a PowerStore
StorageClass. '
@ -13,10 +13,10 @@ icon: https://partner-charts.rancher.io/assets/logos/dell.png
keywords:
- csi
- storage
kubeVersion: '>= 1.21.0 < 1.26.0'
kubeVersion: '>= 1.22.0 < 1.27.0'
maintainers:
- name: DellEMC
name: csi-powerstore
sources:
- https://github.com/dell/csi-powerstore
version: 2.5.1
version: 2.6.0

View File

@ -3,48 +3,48 @@ Return the appropriate sidecar images based on k8s version
*/}}
{{- define "csi-powerstore.attacherImage" -}}
{{- if eq .Capabilities.KubeVersion.Major "1" }}
{{- if and (ge (trimSuffix "+" .Capabilities.KubeVersion.Minor) "21") (le (trimSuffix "+" .Capabilities.KubeVersion.Minor) "25") -}}
{{- print "k8s.gcr.io/sig-storage/csi-attacher:v4.0.0" -}}
{{- if and (ge (trimSuffix "+" .Capabilities.KubeVersion.Minor) "22") (le (trimSuffix "+" .Capabilities.KubeVersion.Minor) "26") -}}
{{- print "k8s.gcr.io/sig-storage/csi-attacher:v4.2.0" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- define "csi-powerstore.provisionerImage" -}}
{{- if eq .Capabilities.KubeVersion.Major "1" }}
{{- if and (ge (trimSuffix "+" .Capabilities.KubeVersion.Minor) "21") (le (trimSuffix "+" .Capabilities.KubeVersion.Minor) "25") -}}
{{- print "k8s.gcr.io/sig-storage/csi-provisioner:v3.3.0" -}}
{{- if and (ge (trimSuffix "+" .Capabilities.KubeVersion.Minor) "22") (le (trimSuffix "+" .Capabilities.KubeVersion.Minor) "26") -}}
{{- print "k8s.gcr.io/sig-storage/csi-provisioner:v3.4.0" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- define "csi-powerstore.snapshotterImage" -}}
{{- if eq .Capabilities.KubeVersion.Major "1" }}
{{- if and (ge (trimSuffix "+" .Capabilities.KubeVersion.Minor) "21") (le (trimSuffix "+" .Capabilities.KubeVersion.Minor) "25") -}}
{{- print "k8s.gcr.io/sig-storage/csi-snapshotter:v6.1.0" -}}
{{- if and (ge (trimSuffix "+" .Capabilities.KubeVersion.Minor) "22") (le (trimSuffix "+" .Capabilities.KubeVersion.Minor) "26") -}}
{{- print "k8s.gcr.io/sig-storage/csi-snapshotter:v6.2.1" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- define "csi-powerstore.resizerImage" -}}
{{- if eq .Capabilities.KubeVersion.Major "1" }}
{{- if and (ge (trimSuffix "+" .Capabilities.KubeVersion.Minor) "21") (le (trimSuffix "+" .Capabilities.KubeVersion.Minor) "25") -}}
{{- print "k8s.gcr.io/sig-storage/csi-resizer:v1.6.0" -}}
{{- if and (ge (trimSuffix "+" .Capabilities.KubeVersion.Minor) "22") (le (trimSuffix "+" .Capabilities.KubeVersion.Minor) "26") -}}
{{- print "k8s.gcr.io/sig-storage/csi-resizer:v1.7.0" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- define "csi-powerstore.registrarImage" -}}
{{- if eq .Capabilities.KubeVersion.Major "1" }}
{{- if and (ge (trimSuffix "+" .Capabilities.KubeVersion.Minor) "21") (le (trimSuffix "+" .Capabilities.KubeVersion.Minor) "25") -}}
{{- print "k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.6.0" -}}
{{- if and (ge (trimSuffix "+" .Capabilities.KubeVersion.Minor) "22") (le (trimSuffix "+" .Capabilities.KubeVersion.Minor) "26") -}}
{{- print "k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.6.3" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- define "csi-powerstore.healthmonitorImage" -}}
{{- if eq .Capabilities.KubeVersion.Major "1" }}
{{- if and (ge (trimSuffix "+" .Capabilities.KubeVersion.Minor) "21") (le (trimSuffix "+" .Capabilities.KubeVersion.Minor) "25") -}}
{{- print "gcr.io/k8s-staging-sig-storage/csi-external-health-monitor-controller:v0.7.0" -}}
{{- if and (ge (trimSuffix "+" .Capabilities.KubeVersion.Minor) "22") (le (trimSuffix "+" .Capabilities.KubeVersion.Minor) "26") -}}
{{- print "gcr.io/k8s-staging-sig-storage/csi-external-health-monitor-controller:v0.8.0" -}}
{{- end -}}
{{- end -}}
{{- end -}}

View File

@ -35,7 +35,13 @@ rules:
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["nodes"]
{{- if hasKey .Values "podmon" }}
{{- if eq .Values.podmon.enabled true }}
verbs: ["get", "list", "watch", "patch"]
{{- else }}
verbs: ["get", "list", "watch"]
{{- end }}
{{- end }}
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete", "update", "patch"]
@ -47,7 +53,13 @@ rules:
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
{{- if hasKey .Values "podmon" }}
{{- if eq .Values.podmon.enabled true }}
verbs: ["get", "list", "watch", "update", "patch", "delete"]
{{- else }}
verbs: ["get", "list", "watch", "update", "patch"]
{{- end }}
{{- end }}
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
@ -81,7 +93,13 @@ rules:
verbs: ["patch"]
- apiGroups: [""]
resources: ["pods"]
{{- if hasKey .Values "podmon" }}
{{- if eq .Values.podmon.enabled true }}
verbs: ["get", "list", "watch", "update", "delete"]
{{- else }}
verbs: ["get", "list", "watch"]
{{- end }}
{{- end }}
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["create", "list", "watch", "delete"]
@ -174,6 +192,33 @@ spec:
- {{ .Release.Name }}-controller
topologyKey: "kubernetes.io/hostname"
containers:
{{- if hasKey .Values "podmon" }}
{{- if eq .Values.podmon.enabled true }}
- name: podmon
image: {{ required "Must provide the podmon container image." .Values.podmon.image }}
imagePullPolicy: {{ .Values.imagePullPolicy }}
args:
{{- toYaml .Values.podmon.controller.args | nindent 12 }}
env:
- name: MY_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: socket-dir
mountPath: /var/run/csi
- name: powerstore-config-params
mountPath: /powerstore-config-params
{{- end }}
{{- end }}
{{- if hasKey .Values "dev" }}
{{ if .Values.dev.enableTracing }}{{- include "pstore.tracing" . | nindent 8 }}{{ end }}
{{- end }}
@ -360,6 +405,20 @@ spec:
value: /powerstore-config/config
- name: X_CSI_POWERSTORE_CONFIG_PARAMS_PATH
value: /powerstore-config-params/driver-config-params.yaml
{{- if hasKey .Values "podmon" }}
- name: X_CSI_PODMON_ENABLED
value: "{{ .Values.podmon.enabled }}"
{{- if eq .Values.podmon.enabled true }}
{{- range $key, $value := .Values.podmon.controller.args }}
{{- if contains "--arrayConnectivityPollRate" $value }}
- name: X_CSI_PODMON_ARRAY_CONNECTIVITY_POLL_RATE
value: "{{ (split "=" $value)._1 }}"
{{- end }}
{{- end }}
{{- end }}
{{- end }}
- name: X_CSI_PODMON_API_PORT
value: "{{ .Values.podmonAPIPort }}"
{{- if hasKey .Values.controller "replication" }}
{{- if eq .Values.controller.replication.enabled true}}
- name: X_CSI_REPLICATION_CONTEXT_PREFIX

View File

@ -22,4 +22,10 @@ metadata:
data:
driver-config-params.yaml: |
CSI_LOG_LEVEL: "{{ .Values.logLevel }}"
CSI_LOG_FORMAT: "{{ .Values.logFormat }}"
CSI_LOG_FORMAT: "{{ .Values.logFormat }}"
{{ if .Values.podmon.enabled }}
PODMON_CONTROLLER_LOG_LEVEL: "{{ .Values.logLevel }}"
PODMON_CONTROLLER_LOG_FORMAT: "{{ .Values.logFormat }}"
PODMON_NODE_LOG_LEVEL: "{{ .Values.logLevel }}"
PODMON_NODE_LOG_FORMAT: "{{ .Values.logFormat }}"
{{ end }}

View File

@ -52,7 +52,16 @@ rules:
resourceNames: ["privileged"]
resources: ["securitycontextconstraints"]
verbs: ["use"]
{{- if hasKey .Values "podmon" }}
{{- if eq .Values.podmon.enabled true }}
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "watch", "update", "delete"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
{{ end }}
{{ end }}
---
@ -84,6 +93,9 @@ spec:
metadata:
labels:
app: {{ .Release.Name }}-node
{{- if .Values.podmon.enabled }}
driver.dellemc.com: dell-storage
{{- end }}
spec:
{{ if .Values.node.nodeSelector }}
nodeSelector:
@ -98,6 +110,58 @@ spec:
hostNetwork: true
hostIPC: true
containers:
{{- if hasKey .Values "podmon" }}
{{- if eq .Values.podmon.enabled true }}
- name: podmon
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: {{ required "Must provide the podmon container image." .Values.podmon.image }}
imagePullPolicy: {{ .Values.imagePullPolicy }}
args:
{{- toYaml .Values.podmon.node.args | nindent 12 }}
env:
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: X_CSI_PRIVATE_MOUNT_DIR
value: {{ .Values.kubeletConfigDir }}
- name: MY_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: kubelet-pods
mountPath: {{ .Values.kubeletConfigDir }}/pods
mountPropagation: "Bidirectional"
- name: driver-path
mountPath: {{ .Values.kubeletConfigDir }}/plugins/{{ .Values.driverName }}
mountPropagation: "Bidirectional"
- name: csi-path
mountPath: {{ .Values.kubeletConfigDir }}/plugins/kubernetes.io/csi
mountPropagation: "Bidirectional"
- name: dev
mountPath: /dev
- name: usr-bin
mountPath: /usr-bin
- name: var-run
mountPath: /var/run
- name: powerstore-config-params
mountPath: /powerstore-config-params
{{- end }}
{{- end }}
{{- if hasKey .Values "dev" }}
{{ if .Values.dev.enableTracing }}{{- include "pstore.tracing" . | nindent 8 }}{{ end }}
{{- end}}
@ -158,6 +222,20 @@ spec:
value: "{{ .Values.controller.healthMonitor.enabled }}"
{{- end }}
{{- end }}
{{- if hasKey .Values "podmon" }}
- name: X_CSI_PODMON_ENABLED
value: "{{ .Values.podmon.enabled }}"
{{- if eq .Values.podmon.enabled true }}
{{- range $key, $value := .Values.podmon.node.args }}
{{- if contains "--arrayConnectivityPollRate" $value }}
- name: X_CSI_PODMON_ARRAY_CONNECTIVITY_POLL_RATE
value: "{{ (split "=" $value)._1 }}"
{{- end }}
{{- end }}
{{- end }}
{{- end }}
- name: X_CSI_PODMON_API_PORT
value: "{{ .Values.podmonAPIPort }}"
volumeMounts:
- name: driver-path
mountPath: {{ .Values.kubeletConfigDir }}/plugins/{{ .Values.driverName }}
@ -255,3 +333,19 @@ spec:
- name: powerstore-config
secret:
secretName: {{ .Release.Name }}-config
{{- if hasKey .Values "podmon" }}
{{- if eq .Values.podmon.enabled true }}
- name: usr-bin
hostPath:
path: /usr/bin
type: Directory
- name: kubelet-pods
hostPath:
path: /var/lib/kubelet/pods
type: Directory
- name: var-run
hostPath:
path: /var/run
type: Directory
{{ end }}
{{ end }}

View File

@ -23,7 +23,7 @@
driverName: "csi-powerstore.dellemc.com"
# Driver version required to pull the latest driver image
version: "v2.5.1"
version: "v2.6.0"
# Specify kubelet config dir path.
# Ensure that the config.yaml file is present at this path.
@ -68,6 +68,12 @@ imagePullPolicy: IfNotPresent
# Default value: "0777"
nfsAcls: "0777"
# podmonAPIPort: Defines the port to be used within the kubernetes cluster
# Allowed values:
# Any valid and free port.
# Default value: 8083
podmonAPIPort: 8083
# controller: configure controller specific parameters
controller:
# controllerCount: defines the number of csi-powerstore controller pods to deploy to
@ -92,8 +98,8 @@ controller:
enabled: false
# image: Image to use for volume-group-snapshotter. This shouldn't be changed
# Allowed values: string
# Default value: dellemc/csi-volumegroup-snapshotter:v1.1.0
image: dellemc/csi-volumegroup-snapshotter:v1.1.0
# Default value: dellemc/csi-volumegroup-snapshotter:v1.2.0
image: dellemc/csi-volumegroup-snapshotter:v1.2.0
# snapshot: allows to enable/disable snapshot feature
# snapshot CRDs needs to be installed before enabling this feature
@ -145,7 +151,7 @@ controller:
# image: Image to use for dell-csi-replicator. This shouldn't be changed
# Allowed values: string
# Default value: None
image: dellemc/dell-csi-replicator:v1.3.0
image: dellemc/dell-csi-replicator:v1.4.0
# replicationContextPrefix: prefix to use for naming of resources created by replication feature
# Allowed values: string
@ -158,7 +164,7 @@ controller:
replicationPrefix: "replication.storage.dell.com"
# Image for csi-metadata-retriever
metadataretriever: dellemc/csi-metadata-retriever:v1.2.0
metadataretriever: dellemc/csi-metadata-retriever:v1.3.0
# nodeSelector: Define node selection constraints for controller pods.
# For the pod to be eligible to run on a node, the node must have each
@ -232,6 +238,33 @@ node:
# operator: "Exists"
# effect: "NoSchedule"
# Uncomment if CSM for Resiliency and CSI Driver pods monitor are enabled
#tolerations:
# - key: "offline.vxflexos.storage.dell.com"
# operator: "Exists"
# effect: "NoSchedule"
# - key: "vxflexos.podmon.storage.dell.com"
# operator: "Exists"
# effect: "NoSchedule"
# - key: "offline.unity.storage.dell.com"
# operator: "Exists"
# effect: "NoSchedule"
# - key: "unity.podmon.storage.dell.com"
# operator: "Exists"
# effect: "NoSchedule"
# - key: "offline.isilon.storage.dell.com"
# operator: "Exists"
# effect: "NoSchedule"
# - key: "isilon.podmon.storage.dell.com"
# operator: "Exists"
# effect: "NoSchedule"
# - key: "offline.powerstore.storage.dell.com"
# operator: "Exists"
# effect: "NoSchedule"
# - key: "powerstore.podmon.storage.dell.com"
# operator: "Exists"
# effect: "NoSchedule"
## PLATFORM ATTRIBUTES
######################
@ -275,6 +308,34 @@ storageCapacity:
# Default value: 5m
pollInterval: 5m
# Enable this feature only after contact support for additional information
podmon:
enabled: false
image: dellemc/podmon:v1.5.0
#controller:
# args:
# - "--csisock=unix:/var/run/csi/csi.sock"
# - "--labelvalue=csi-powerstore"
# - "--arrayConnectivityPollRate=60"
# - "--driverPath=csi-powerstore.dellemc.com"
# - "--mode=controller"
# - "--skipArrayConnectionValidation=false"
# - "--driver-config-params=/powerstore-config-params/driver-config-params.yaml"
# - "--driverPodLabelValue=dell-storage"
# - "--ignoreVolumelessPods=false"
#node:
# args:
# - "--csisock=unix:/var/lib/kubelet/plugins/csi-powerstore.dellemc.com/csi_sock"
# - "--labelvalue=csi-powerstore"
# - "--arrayConnectivityPollRate=60"
# - "--driverPath=csi-powerstore.dellemc.com"
# - "--mode=node"
# - "--leaderelection=false"
# - "--driver-config-params=/powerstore-config-params/driver-config-params.yaml"
# - "--driverPodLabelValue=dell-storage"
# - "--ignoreVolumelessPods=false"
images:
# "driver" defines the container image, used for the driver container.
driverRepository: dellemc

View File

@ -1,11 +1,11 @@
annotations:
catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: Dell CSI Unity
catalog.cattle.io/kube-version: '>= 1.21.0 < 1.26.0'
catalog.cattle.io/kube-version: '>= 1.24.0 < 1.27.0'
catalog.cattle.io/namespace: unity
catalog.cattle.io/release-name: csi-unity
apiVersion: v1
appVersion: 2.5.0
appVersion: 2.6.0
description: 'Unity XT CSI (Container Storage Interface) driver Kubernetes integration.
This chart includes everything required to provision via CSI as well as a Unity
XT StorageClass. '
@ -13,10 +13,10 @@ icon: https://partner-charts.rancher.io/assets/logos/dell.png
keywords:
- csi
- storage
kubeVersion: '>= 1.21.0 < 1.26.0'
kubeVersion: '>= 1.24.0 < 1.27.0'
maintainers:
- name: DellEMC
name: csi-unity
sources:
- https://github.com/dell/csi-unity
version: 2.5.0
version: 2.6.0

View File

@ -3,48 +3,48 @@ Return the appropriate sidecar images based on k8s version
*/}}
{{- define "csi-unity.attacherImage" -}}
{{- if eq .Capabilities.KubeVersion.Major "1" }}
{{- if and (ge (trimSuffix "+" .Capabilities.KubeVersion.Minor) "21") (le (trimSuffix "+" .Capabilities.KubeVersion.Minor) "25") -}}
{{- print "k8s.gcr.io/sig-storage/csi-attacher:v4.0.0" -}}
{{- if and (ge (trimSuffix "+" .Capabilities.KubeVersion.Minor) "22") (le (trimSuffix "+" .Capabilities.KubeVersion.Minor) "26") -}}
{{- print "k8s.gcr.io/sig-storage/csi-attacher:v4.2.0" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- define "csi-unity.provisionerImage" -}}
{{- if eq .Capabilities.KubeVersion.Major "1" }}
{{- if and (ge (trimSuffix "+" .Capabilities.KubeVersion.Minor) "21") (le (trimSuffix "+" .Capabilities.KubeVersion.Minor) "25") -}}
{{- print "k8s.gcr.io/sig-storage/csi-provisioner:v3.3.0" -}}
{{- if and (ge (trimSuffix "+" .Capabilities.KubeVersion.Minor) "22") (le (trimSuffix "+" .Capabilities.KubeVersion.Minor) "26") -}}
{{- print "k8s.gcr.io/sig-storage/csi-provisioner:v3.4.0" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- define "csi-unity.snapshotterImage" -}}
{{- if eq .Capabilities.KubeVersion.Major "1" }}
{{- if and (ge (trimSuffix "+" .Capabilities.KubeVersion.Minor) "21") (le (trimSuffix "+" .Capabilities.KubeVersion.Minor) "25") -}}
{{- print "k8s.gcr.io/sig-storage/csi-snapshotter:v6.1.0" -}}
{{- if and (ge (trimSuffix "+" .Capabilities.KubeVersion.Minor) "22") (le (trimSuffix "+" .Capabilities.KubeVersion.Minor) "26") -}}
{{- print "k8s.gcr.io/sig-storage/csi-snapshotter:v6.2.1" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- define "csi-unity.resizerImage" -}}
{{- if eq .Capabilities.KubeVersion.Major "1" }}
{{- if and (ge (trimSuffix "+" .Capabilities.KubeVersion.Minor) "21") (le (trimSuffix "+" .Capabilities.KubeVersion.Minor) "25") -}}
{{- print "k8s.gcr.io/sig-storage/csi-resizer:v1.6.0" -}}
{{- if and (ge (trimSuffix "+" .Capabilities.KubeVersion.Minor) "22") (le (trimSuffix "+" .Capabilities.KubeVersion.Minor) "26") -}}
{{- print "k8s.gcr.io/sig-storage/csi-resizer:v1.7.0" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- define "csi-unity.registrarImage" -}}
{{- if eq .Capabilities.KubeVersion.Major "1" }}
{{- if and (ge (trimSuffix "+" .Capabilities.KubeVersion.Minor) "21") (le (trimSuffix "+" .Capabilities.KubeVersion.Minor) "25") -}}
{{- print "k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.6.0" -}}
{{- if and (ge (trimSuffix "+" .Capabilities.KubeVersion.Minor) "22") (le (trimSuffix "+" .Capabilities.KubeVersion.Minor) "26") -}}
{{- print "k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.6.3" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- define "csi-unity.healthmonitorImage" -}}
{{- if eq .Capabilities.KubeVersion.Major "1" }}
{{- if and (ge (trimSuffix "+" .Capabilities.KubeVersion.Minor) "21") (le (trimSuffix "+" .Capabilities.KubeVersion.Minor) "25") -}}
{{- print "gcr.io/k8s-staging-sig-storage/csi-external-health-monitor-controller:v0.7.0" -}}
{{- if and (ge (trimSuffix "+" .Capabilities.KubeVersion.Minor) "22") (le (trimSuffix "+" .Capabilities.KubeVersion.Minor) "26") -}}
{{- print "gcr.io/k8s-staging-sig-storage/csi-external-health-monitor-controller:v0.8.0" -}}
{{- end -}}
{{- end -}}
{{- end -}}

View File

@ -3,8 +3,8 @@
# version: version of this values file
# Note: Do not change this value
# Examples : "v2.5.0" , "nightly"
version: "v2.5.0"
# Examples : "v2.6.0" , "nightly"
version: "v2.6.0"
# LogLevel is used to set the logging level of the driver.
# Allowed values: "error", "warn"/"warning", "info", "debug"
@ -193,7 +193,7 @@ podmon:
# allowed values - string
# default value : None
# Example : "podman:latest", "pod:latest"
image: dellemc/podmon:v1.4.0
image: dellemc/podmon:v1.5.0
# controller:
# args:
# - "--csisock=unix:/var/run/csi/csi.sock"

View File

@ -1,11 +1,11 @@
annotations:
catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: Dell CSI PowerFlex
catalog.cattle.io/kube-version: '>= 1.21.0 < 1.26.0'
catalog.cattle.io/kube-version: '>= 1.21.0 < 1.27.0'
catalog.cattle.io/namespace: vxflexos
catalog.cattle.io/release-name: vxflexos
apiVersion: v2
appVersion: 2.5.0
appVersion: 2.6.0
description: 'VxFlex OS CSI (Container Storage Interface) driver Kubernetes integration.
This chart includes everything required to provision via CSI as well as a VxFlex
OS StorageClass. '
@ -13,10 +13,10 @@ icon: https://partner-charts.rancher.io/assets/logos/dell.png
keywords:
- csi
- storage
kubeVersion: '>= 1.21.0 < 1.26.0'
kubeVersion: '>= 1.21.0 < 1.27.0'
maintainers:
- name: DellEMC
name: csi-vxflexos
sources:
- https://github.com/dell/csi-vxflexos
version: 2.5.0
version: 2.6.0

View File

@ -3,48 +3,48 @@ Return the appropriate sidecar images based on k8s version
*/}}
{{- define "csi-vxflexos.attacherImage" -}}
{{- if eq .Capabilities.KubeVersion.Major "1" }}
{{- if and (ge (trimSuffix "+" .Capabilities.KubeVersion.Minor) "21") (le (trimSuffix "+" .Capabilities.KubeVersion.Minor) "25") -}}
{{- print "k8s.gcr.io/sig-storage/csi-attacher:v4.0.0" -}}
{{- if and (ge (trimSuffix "+" .Capabilities.KubeVersion.Minor) "21") (le (trimSuffix "+" .Capabilities.KubeVersion.Minor) "26") -}}
{{- print "k8s.gcr.io/sig-storage/csi-attacher:v4.2.0" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- define "csi-vxflexos.provisionerImage" -}}
{{- if eq .Capabilities.KubeVersion.Major "1" }}
{{- if and (ge (trimSuffix "+" .Capabilities.KubeVersion.Minor) "21") (le (trimSuffix "+" .Capabilities.KubeVersion.Minor) "25") -}}
{{- print "k8s.gcr.io/sig-storage/csi-provisioner:v3.3.0" -}}
{{- if and (ge (trimSuffix "+" .Capabilities.KubeVersion.Minor) "21") (le (trimSuffix "+" .Capabilities.KubeVersion.Minor) "26") -}}
{{- print "k8s.gcr.io/sig-storage/csi-provisioner:v3.4.0" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- define "csi-vxflexos.snapshotterImage" -}}
{{- if eq .Capabilities.KubeVersion.Major "1" }}
{{- if and (ge (trimSuffix "+" .Capabilities.KubeVersion.Minor) "21") (le (trimSuffix "+" .Capabilities.KubeVersion.Minor) "25") -}}
{{- print "k8s.gcr.io/sig-storage/csi-snapshotter:v6.1.0" -}}
{{- if and (ge (trimSuffix "+" .Capabilities.KubeVersion.Minor) "21") (le (trimSuffix "+" .Capabilities.KubeVersion.Minor) "26") -}}
{{- print "k8s.gcr.io/sig-storage/csi-snapshotter:v6.2.1" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- define "csi-vxflexos.resizerImage" -}}
{{- if eq .Capabilities.KubeVersion.Major "1" }}
{{- if and (ge (trimSuffix "+" .Capabilities.KubeVersion.Minor) "21") (le (trimSuffix "+" .Capabilities.KubeVersion.Minor) "25") -}}
{{- print "k8s.gcr.io/sig-storage/csi-resizer:v1.6.0" -}}
{{- if and (ge (trimSuffix "+" .Capabilities.KubeVersion.Minor) "21") (le (trimSuffix "+" .Capabilities.KubeVersion.Minor) "26") -}}
{{- print "k8s.gcr.io/sig-storage/csi-resizer:v1.7.0" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- define "csi-vxflexos.registrarImage" -}}
{{- if eq .Capabilities.KubeVersion.Major "1" }}
{{- if and (ge (trimSuffix "+" .Capabilities.KubeVersion.Minor) "21") (le (trimSuffix "+" .Capabilities.KubeVersion.Minor) "25") -}}
{{- print "k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.6.0" -}}
{{- if and (ge (trimSuffix "+" .Capabilities.KubeVersion.Minor) "21") (le (trimSuffix "+" .Capabilities.KubeVersion.Minor) "26") -}}
{{- print "k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.6.3" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- define "csi-vxflexos.healthmonitorImage" -}}
{{- if eq .Capabilities.KubeVersion.Major "1" }}
{{- if and (ge (trimSuffix "+" .Capabilities.KubeVersion.Minor) "21") (le (trimSuffix "+" .Capabilities.KubeVersion.Minor) "25") -}}
{{- print "gcr.io/k8s-staging-sig-storage/csi-external-health-monitor-controller:v0.7.0" -}}
{{- if and (ge (trimSuffix "+" .Capabilities.KubeVersion.Minor) "21") (le (trimSuffix "+" .Capabilities.KubeVersion.Minor) "26") -}}
{{- print "gcr.io/k8s-staging-sig-storage/csi-external-health-monitor-controller:v0.8.0" -}}
{{- end -}}
{{- end -}}
{{- end -}}

View File

@ -95,6 +95,22 @@ rules:
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["create", "list", "watch", "delete", "update"]
{{- if hasKey .Values.controller "replication" }}
{{- if eq .Values.controller.replication.enabled true}}
- apiGroups: ["replication.storage.dell.com"]
resources: ["dellcsireplicationgroups"]
verbs: ["create", "delete", "get", "list", "patch", "update", "watch"]
- apiGroups: ["replication.storage.dell.com"]
resources: ["dellcsireplicationgroups/status"]
verbs: ["get", "patch", "update"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["create", "delete", "get", "list", "watch", "update", "patch"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["create", "get", "list", "watch"]
{{- end}}
{{- end}}
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
@ -201,6 +217,34 @@ spec:
volumeMounts:
- name: socket-dir
mountPath: /var/run/csi
{{- if hasKey .Values.controller "replication" }}
{{- if eq .Values.controller.replication.enabled true}}
- name: dell-csi-replicator
image: {{ required "Must provide the Dell CSI Replicator image." .Values.controller.replication.image}}
imagePullPolicy: {{ .Values.imagePullPolicy }}
args:
- "--csi-address=$(ADDRESS)"
- "--leader-election=true"
- "--worker-threads=2"
- "--retry-interval-start=1s"
- "--retry-interval-max=300s"
- "--timeout=300s"
- "--context-prefix={{ .Values.controller.replication.replicationContextPrefix}}"
- "--prefix={{ .Values.controller.replication.replicationPrefix}}"
env:
- name: ADDRESS
value: /var/run/csi/csi.sock
- name: X_CSI_REPLICATION_CONFIG_DIR
value: /vxflexos-config-params
- name: X_CSI_REPLICATION_CONFIG_FILE_NAME
value: driver-config-params.yaml
volumeMounts:
- name: socket-dir
mountPath: /var/run/csi
- name: vxflexos-config-params
mountPath: /vxflexos-config-params
{{- end }}
{{- end }}
- name: provisioner
image: {{ required "Must provide the CSI provisioner container image." ( include "csi-vxflexos.provisionerImage" . ) }}
imagePullPolicy: {{ .Values.imagePullPolicy }}
@ -339,6 +383,14 @@ spec:
value: "{{ required "Enable this to have CSI ListVolumes include snapshots" .Values.enablelistvolumesnapshot }}"
- name: SSL_CERT_DIR
value: /certs
{{- if hasKey .Values.controller "replication" }}
{{- if eq .Values.controller.replication.enabled true}}
- name: X_CSI_REPLICATION_CONTEXT_PREFIX
value: {{ .Values.controller.replication.replicationContextPrefix | default "powerflex"}}
- name: X_CSI_REPLICATION_PREFIX
value: {{ .Values.controller.replication.replicationPrefix | default "replication.storage.dell.com"}}
{{- end }}
{{- end }}
{{- if hasKey .Values.controller "healthMonitor" }}
{{- if eq .Values.controller.healthMonitor.enabled true}}
- name: X_CSI_HEALTH_MONITOR_ENABLED

View File

@ -203,6 +203,18 @@ spec:
- name: X_CSI_HEALTH_MONITOR_ENABLED
value: "{{ .Values.node.healthMonitor.enabled }}"
{{- end }}
{{- if hasKey .Values.node "approveSDC" }}
- name: X_CSI_APPROVE_SDC_ENABLED
value: "{{ .Values.node.approveSDC.enabled }}"
{{- end }}
{{- if hasKey .Values.node "renameSDC" }}
- name: X_CSI_RENAME_SDC_ENABLED
value: "{{ .Values.node.renameSDC.enabled }}"
{{- if eq .Values.node.renameSDC.enabled true }}
- name: X_CSI_RENAME_SDC_PREFIX
value: "{{ .Values.node.renameSDC.prefix }}"
{{- end }}
{{- end }}
volumeMounts:
- name: driver-path
mountPath: {{ .Values.kubeletConfigDir }}/plugins/vxflexos.emc.dell.com

View File

@ -3,7 +3,7 @@
# "version" is used to verify the values file matches driver version
# Not recommend to change
version: v2.5.0
version: v2.6.0
images:
# "driver" defines the container image, used for the driver container.
@ -82,6 +82,31 @@ fsGroupPolicy: File
# "controller" allows to configure controller specific parameters
controller:
# replication: allows to configure replication
# Replication CRDs must be installed before installing driver
replication:
# enabled: Enable/Disable replication feature
# Allowed values:
# true: enable replication feature(install dell-csi-replicator sidecar)
# false: disable replication feature(do not install dell-csi-replicator sidecar)
# Default value: false
enabled: false
# image: Image to use for dell-csi-replicator. This shouldn't be changed
# Allowed values: string
# Default value: None
image: dellemc/dell-csi-replicator:v1.4.0
# replicationContextPrefix: prefix to use for naming of resources created by replication feature
# Allowed values: string
# Default value: powerflex
replicationContextPrefix: "powerflex"
# replicationPrefix: prefix to prepend to storage classes parameters
# Allowed values: string
# Default value: replication.storage.dell.com
replicationPrefix: "replication.storage.dell.com"
healthMonitor:
# enabled: Enable/Disable health monitor of CSI volumes
# Allowed values:
@ -194,6 +219,32 @@ node:
# - key: "isilon.podmon.storage.dell.com"
# operator: "Exists"
# effect: "NoSchedule"
# "renameSDC" defines the rename operation for SDC
# Default value: None
renameSDC:
# enabled: Enable/Disable rename of SDC
# Allowed values:
# true: enable renaming
# false: disable renaming
# Default value: "false"
enabled: false
# "prefix" defines a string for prefix of the SDC name.
# "prefix" + "worker_node_hostname" should not exceed 31 chars.
# Default value: none
# Examples: "rhel-sdc", "sdc-test"
prefix: "sdc-test"
# "approveSDC" defines the approve operation for SDC
# Default value: None
approveSDC:
# enabled: Enable/Disable SDC approval
#Allowed values:
# true: Driver will attempt to approve restricted SDC by GUID during setup
# false: Driver will not attempt to approve restricted SDC by GUID during setup
# Default value: false
enabled: false
# monitoring pod details
# These options control the running of the monitoring container
# This container gather diagnostic information in case of failure
@ -226,7 +277,7 @@ vgsnapshotter:
# Enable this feature only after contact support for additional information
podmon:
enabled: false
image: dellemc/podmon:v1.4.0
image: dellemc/podmon:v1.5.0
#controller:
# args:
# - "--csisock=unix:/var/run/csi/csi.sock"
@ -257,8 +308,8 @@ authorization:
enabled: false
# sidecarProxyImage: the container image used for the csm-authorization-sidecar.
# Default value: dellemc/csm-authorization-sidecar:v1.5.0
sidecarProxyImage: dellemc/csm-authorization-sidecar:v1.5.0
# Default value: dellemc/csm-authorization-sidecar:v1.6.0
sidecarProxyImage: dellemc/csm-authorization-sidecar:v1.6.0
# proxyHost: hostname of the csm-authorization server
# Default value: None

View File

@ -2,22 +2,22 @@ annotations:
catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: NGINX Ingress Controller
catalog.cattle.io/featured: "3"
catalog.cattle.io/kube-version: '>= 1.21.0-0'
catalog.cattle.io/kube-version: '>= 1.22.0-0'
catalog.cattle.io/release-name: nginx-ingress
apiVersion: v2
appVersion: 3.0.2
appVersion: 3.1.0
description: NGINX Ingress Controller
home: https://github.com/nginxinc/kubernetes-ingress
icon: https://raw.githubusercontent.com/nginxinc/kubernetes-ingress/v3.0.2/deployments/helm-chart/chart-icon.png
icon: https://raw.githubusercontent.com/nginxinc/kubernetes-ingress/v3.1.0/deployments/helm-chart/chart-icon.png
keywords:
- ingress
- nginx
kubeVersion: '>= 1.21.0-0'
kubeVersion: '>= 1.22.0-0'
maintainers:
- email: kubernetes@nginx.com
name: nginxinc
name: nginx-ingress
sources:
- https://github.com/nginxinc/kubernetes-ingress/tree/v3.0.2/deployments/helm-chart
- https://github.com/nginxinc/kubernetes-ingress/tree/v3.1.0/deployments/helm-chart
type: application
version: 0.16.2
version: 0.17.0

View File

@ -8,50 +8,77 @@ This chart deploys the NGINX Ingress Controller in your Kubernetes cluster.
- A [Kubernetes Version Supported by the Ingress Controller](https://docs.nginx.com/nginx-ingress-controller/technical-specifications/#supported-kubernetes-versions)
- Helm 3.0+.
- Git.
- If youd like to use NGINX Plus:
- To pull from the F5 Container registry, configure a docker registry secret using your JWT token from the MyF5 portal by following the instructions from [here](https://docs.nginx.com/nginx-ingress-controller/installation/using-the-jwt-token-docker-secret). Make sure to specify the secret using `controller.serviceAccount.imagePullSecretName` parameter.
- Alternatively, pull an Ingress Controller image with NGINX Plus and push it to your private registry by following the instructions from [here](https://docs.nginx.com/nginx-ingress-controller/installation/pulling-ingress-controller-image).
- Alternatively, you can build an Ingress Controller image with NGINX Plus and push it to your private registry by following the instructions from [here](https://docs.nginx.com/nginx-ingress-controller/installation/building-ingress-controller-image).
- Update the `controller.image.repository` field of the `values-plus.yaml` accordingly.
- If youd like to use App Protect DoS, please install App Protect DoS Arbitrator helm chart. Make sure to install in the same namespace as the NGINX Ingress Controller. Note that if you install multiple NGINX Ingress Controllers in the same namespace, they will need to share the same Arbitrator because it is not possible to install more than one Arbitrator in a single namespace.
- If youd like to use App Protect DoS, please install App Protect DoS Arbitrator [helm chart](https://github.com/nginxinc/nap-dos-arbitrator-helm-chart). Make sure to install in the same namespace as the NGINX Ingress Controller. Note that if you install multiple NGINX Ingress Controllers in the same namespace, they will need to share the same Arbitrator because it is not possible to install more than one Arbitrator in a single namespace.
## CRDs
By default, the Ingress Controller requires a number of custom resource definitions (CRDs) installed in the cluster. The Helm client will install those CRDs. If the CRDs are not installed, the Ingress Controller pods will not become `Ready`.
If you do not use the custom resources that require those CRDs (which corresponds to `controller.enableCustomResources` set to `false` and `controller.appprotect.enable` set to `false` and `controller.appprotectdos.enable` set to `false`), the installation of the CRDs can be skipped by specifying `--skip-crds` for the helm install command.
### Upgrading the CRDs
To upgrade the CRDs, pull the chart sources as described in [Pulling the Chart](#pulling-the-chart) and then run:
```console
$ kubectl apply -f crds/
```
> **Note**
>
> The following warning is expected and can be ignored: `Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply`.
>
> Make sure to check the [release notes](https://www.github.com/nginxinc/kubernetes-ingress/releases)for a new release for any special upgrade procedures.
### Uninstalling the CRDs
To remove the CRDs, pull the chart sources as described in [Pulling the Chart](#pulling-the-chart) and then run:
```console
$ kubectl delete -f crds/
```
> **Note**
>
> This command will delete all the corresponding custom resources in your cluster across all namespaces. Please ensure there are no custom resources that you want to keep and there are no other Ingress Controller releases running in the cluster.
## Getting the Chart Sources
## Managing the Chart via OCI Registry (edge version)
This step is required if you're installing the chart using its sources. Additionally, the step is also required for managing the custom resource definitions (CRDs), which the Ingress Controller requires by default, or for upgrading/deleting the CRDs.
> **Warning**
>
> The `edge` version is not intended for production use. It is intended for testing and development purposes only.
1. Clone the Ingress Controller repo:
```console
$ git clone https://github.com/nginxinc/kubernetes-ingress --branch v3.0.2
```
**Note**: If you want to use the experimental repository (`edge`), remove the `--branch` flag and value.
### Installing the Chart
2. Change your working directory to /deployments/helm-chart:
```console
$ cd kubernetes-ingress/deployments/helm-chart
```
To install the chart with the release name my-release (my-release is the name that you choose):
## Adding the Helm Repository
For NGINX:
```console
$ helm install my-release oci://ghcr.io/nginxinc/charts/nginx-ingress --version 0.0.0-edge
```
This step is required if you're installing the chart via the helm repository.
For NGINX Plus: (assuming you have pushed the Ingress Controller image `nginx-plus-ingress` to your private registry `myregistry.example.com`)
```console
$ helm install my-release oci://ghcr.io/nginxinc/charts/nginx-ingress --version 0.0.0-edge --set controller.image.repository=myregistry.example.com/nginx-plus-ingress --set controller.nginxplus=true
```
This will install the latest `edge` version of the Ingress Controller from GitHub Container Registry. If you prefer to use Docker Hub, you can replace `ghcr.io/nginxinc/charts/nginx-ingress` with `registry-1.docker.io/nginxcharts/nginx-ingress`.
## Managing the Chart via Helm Repository
### Adding the helm repository
```console
$ helm repo add nginx-stable https://helm.nginx.com/stable
$ helm repo update
```
**Note**: If you want to use the experimental repository, replace `stable` with `edge`.
## Installing the Chart
### Installing the CRDs
By default, the Ingress Controller requires a number of custom resource definitions (CRDs) installed in the cluster. The Helm client will install those CRDs. If the CRDs are not installed, the Ingress Controller pods will not become `Ready`.
If you do not use the custom resources that require those CRDs (which corresponds to `controller.enableCustomResources` set to `false` and `controller.appprotect.enable` set to `false` and `controller.appprotectdos.enable` set to `false`), the installation of the CRDs can be skipped by specifying `--skip-crds` for the helm install command.
### Installing via Helm Repository
### Installing the Chart
To install the chart with the release name my-release (my-release is the name that you choose):
@ -65,9 +92,44 @@ For NGINX Plus: (assuming you have pushed the Ingress Controller image `nginx-pl
$ helm install my-release nginx-stable/nginx-ingress --set controller.image.repository=myregistry.example.com/nginx-plus-ingress --set controller.nginxplus=true
```
**Note**: If you want to use the experimental repository, replace `stable` with `edge` and add the `--devel` flag.
### Upgrading the Chart
### Installing Using Chart Sources
Helm does not upgrade the CRDs during a release upgrade. Before you upgrade a release, see [Upgrading the CRDs](#upgrading-the-crds).
To upgrade the release `my-release`:
```console
$ helm upgrade my-release nginx-stable/nginx-ingress
```
### Uninstalling the Chart
To uninstall/delete the release `my-release`:
```console
$ helm uninstall my-release
```
The command removes all the Kubernetes components associated with the release and deletes the release.
Uninstalling the release does not remove the CRDs. To remove the CRDs, see [Uninstalling the CRDs](#uninstalling-the-crds).
## Managing the Chart via Sources
### Pulling the Chart
This step is required if you're installing the chart using its sources. Additionally, the step is also required for managing the custom resource definitions (CRDs), which the Ingress Controller requires by default, or for upgrading/deleting the CRDs.
1. Pull the chart sources:
```console
$ helm pull nginx-stable/nginx-ingress --untar --version 0.16.1
```
2. Change your working directory to nginx-ingress:
```console
$ cd nginx-ingress
```
### Installing
To install the chart with the release name my-release (my-release is the name that you choose):
@ -81,60 +143,16 @@ For NGINX Plus:
$ helm install my-release -f values-plus.yaml .
```
**Note**: If you want to use the experimental repository, replace the value in the `tag` field inside the yaml files with `edge`.
The command deploys the Ingress Controller in your Kubernetes cluster in the default configuration. The configuration section lists the parameters that can be configured during installation.
When deploying the Ingress Controller, make sure to use your own TLS certificate and key for the default server rather than the default pre-generated ones. Read the [Configuration](#Configuration) section below to see how to configure a TLS certificate and key for the default server. Note that the default server returns the Not Found page with the 404 status code for all requests for domains for which there are no Ingress rules defined.
## Upgrading the Chart
### Upgrading the CRDs
Helm does not upgrade the CRDs during a release upgrade. Before you upgrade a release, run the following command to upgrade the CRDs:
```console
$ kubectl apply -f crds/
```
> **Note**: The following warning is expected and can be ignored: `Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply`.
> **Note**: Make sure to check the [release notes](https://www.github.com/nginxinc/kubernetes-ingress/releases)for a new release for any special upgrade procedures.
### Upgrading the Release
To upgrade the release `my-release`:
#### Upgrade Using Chart Sources:
### Upgrading
```console
$ helm upgrade my-release .
```
#### Upgrade via Helm Repository:
```console
$ helm upgrade my-release nginx-stable/nginx-ingress
```
## Uninstalling the Chart
### Uninstalling the Release
To uninstall/delete the release `my-release`:
```console
$ helm uninstall my-release
```
The command removes all the Kubernetes components associated with the release and deletes the release.
### Uninstalling the CRDs
Uninstalling the release does not remove the CRDs. To remove the CRDs, run:
```console
$ kubectl delete -f crds/
```
> **Note**: This command will delete all the corresponding custom resources in your cluster across all namespaces. Please ensure there are no custom resources that you want to keep and there are no other Ingress Controller releases running in the cluster.
## Running Multiple Ingress Controllers
@ -142,6 +160,7 @@ If you are running multiple Ingress Controller releases in your cluster with ena
See [running multiple Ingress Controllers](https://docs.nginx.com/nginx-ingress-controller/installation/running-multiple-ingress-controllers/) for more details.
## Configuration
The following tables lists the configurable parameters of the NGINX Ingress Controller chart and their default values.
@ -159,7 +178,7 @@ Parameter | Description | Default
`controller.logLevel` | The log level of the Ingress Controller. | 1
`controller.image.digest ` | The image digest of the Ingress Controller. | None
`controller.image.repository` | The image repository of the Ingress Controller. | nginx/nginx-ingress
`controller.image.tag` | The tag of the Ingress Controller image. | 3.0.2
`controller.image.tag` | The tag of the Ingress Controller image. | 3.1.0
`controller.image.pullPolicy` | The pull policy for the Ingress Controller image. | IfNotPresent
`controller.lifecycle` | The lifecycle of the Ingress Controller pods. | {}
`controller.customConfigMap` | The name of the custom ConfigMap used by the Ingress Controller. If set, then the default config is ignored. | ""
@ -167,8 +186,8 @@ Parameter | Description | Default
`controller.config.annotations` | The annotations of the Ingress Controller configmap. | {}
`controller.config.entries` | The entries of the ConfigMap for customizing NGINX configuration. See [ConfigMap resource docs](https://docs.nginx.com/nginx-ingress-controller/configuration/global-configuration/configmap-resource/) for the list of supported ConfigMap keys. | {}
`controller.customPorts` | A list of custom ports to expose on the NGINX Ingress Controller pod. Follows the conventional Kubernetes yaml syntax for container ports. | []
`controller.defaultTLS.cert` | The base64-encoded TLS certificate for the default HTTPS server. **Note:** By default, a pre-generated self-signed certificate is used. It is recommended that you specify your own certificate. Alternatively, omitting the default server secret completely will configure NGINX to reject TLS connections to the default server. | A pre-generated self-signed certificate.
`controller.defaultTLS.key` | The base64-encoded TLS key for the default HTTPS server. **Note:** By default, a pre-generated key is used. It is recommended that you specify your own key. Alternatively, omitting the default server secret completely will configure NGINX to reject TLS connections to the default server. | A pre-generated key.
`controller.defaultTLS.cert` | The base64-encoded TLS certificate for the default HTTPS server. **Note:** By default, a pre-generated self-signed certificate is used. It is recommended that you specify your own certificate. Alternatively, omitting the default server secret completely will configure NGINX to reject TLS connections to the default server. | A pre-generated self-signed certificate.
`controller.defaultTLS.key` | The base64-encoded TLS key for the default HTTPS server. **Note:** By default, a pre-generated key is used. It is recommended that you specify your own key. Alternatively, omitting the default server secret completely will configure NGINX to reject TLS connections to the default server. | A pre-generated key.
`controller.defaultTLS.secret` | The secret with a TLS certificate and key for the default HTTPS server. The value must follow the following format: `<namespace>/<name>`. Used as an alternative to specifying a certificate and key using `controller.defaultTLS.cert` and `controller.defaultTLS.key` parameters. **Note:** Alternatively, omitting the default server secret completely will configure NGINX to reject TLS connections to the default server. | None
`controller.wildcardTLS.cert` | The base64-encoded TLS certificate for every Ingress/VirtualServer host that has TLS enabled but no secret specified. If the parameter is not set, for such Ingress/VirtualServer hosts NGINX will break any attempt to establish a TLS connection. | None
`controller.wildcardTLS.key` | The base64-encoded TLS key for every Ingress/VirtualServer host that has TLS enabled but no secret specified. If the parameter is not set, for such Ingress/VirtualServer hosts NGINX will break any attempt to establish a TLS connection. | None
@ -178,6 +197,7 @@ Parameter | Description | Default
`controller.tolerations` | The tolerations of the Ingress Controller pods. | []
`controller.affinity` | The affinity of the Ingress Controller pods. | {}
`controller.topologySpreadConstraints` | The topology spread constraints of the Ingress controller pods. | {}
`controller.env` | The additional environment variables to be set on the Ingress Controller pods. | []
`controller.volumes` | The volumes of the Ingress Controller pods. | []
`controller.volumeMounts` | The volumeMounts of the Ingress Controller pods. | []
`controller.initContainers` | InitContainers for the Ingress Controller pods. | []
@ -220,7 +240,7 @@ Parameter | Description | Default
`controller.service.httpPort.targetPort` | The target port of the HTTP port of the Ingress Controller service. | 80
`controller.service.httpsPort.enable` | Enables the HTTPS port for the Ingress Controller service. | true
`controller.service.httpsPort.port` | The HTTPS port of the Ingress Controller service. | 443
`controller.service.httpsPort.nodePort` | The custom NodePort for the HTTPS port. Requires `controller.service.type` set to `NodePort`. | ""
`controller.service.httpsPort.nodePort` | The custom NodePort for the HTTPS port. Requires `controller.service.type` set to `NodePort`. | ""
`controller.service.httpsPort.targetPort` | The target port of the HTTPS port of the Ingress Controller service. | 443
`controller.serviceAccount.annotations` | The annotations of the Ingress Controller service account. | {}
`controller.serviceAccount.name` | The name of the service account of the Ingress Controller pods. Used for RBAC. | Autogenerated
@ -255,10 +275,15 @@ Parameter | Description | Default
`controller.autoscaling.maxReplicas` | Maximum number of replicas for the HPA. | 3
`controller.autoscaling.targetCPUUtilizationPercentage` | The target CPU utilization percentage. | 50
`controller.autoscaling.targetMemoryUtilizationPercentage` | The target memory utilization percentage. | 50
`controller.podDisruptionBudget.enabled` | Enables PodDisruptionBudget. | false
`controller.podDisruptionBudget.annotations` | The annotations of the Ingress Controller pod disruption budget | {}
`controller.podDisruptionBudget.minAvailable` | The number of Ingress Controller pods that should be available. This is a mutually exclusive setting with "maxUnavailable". | 0
`controller.podDisruptionBudget.maxUnavailable` | The number of Ingress Controller pods that can be unavailable. This is a mutually exclusive setting with "minAvailable". | 0
`controller.strategy` | Specifies the strategy used to replace old Pods with new ones. Docs for [Deployment update strategy](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy) and [Daemonset update strategy](https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/#daemonset-update-strategy) | {}
`controller.disableIPV6` | Disable IPV6 listeners explicitly for nodes that do not support the IPV6 stack. | false
`controller.readOnlyRootFilesystem` | Configure root filesystem as read-only and add volumes for temporary data. | false
`rbac.create` | Configures RBAC. | true
`prometheus.create` | Expose NGINX or NGINX Plus metrics in the Prometheus format. | false
`prometheus.create` | Expose NGINX or NGINX Plus metrics in the Prometheus format. | true
`prometheus.port` | Configures the port to scrape the metrics. | 9113
`prometheus.scheme` | Configures the HTTP scheme to use for connections to the Prometheus endpoint. | http
`prometheus.secret` | The namespace / name of a Kubernetes TLS Secret. If specified, this secret is used to secure the Prometheus endpoint with TLS connections. | ""

View File

@ -39,12 +39,13 @@ spec:
type: object
properties:
protocol:
description: Protocol determines if the server listens on http1 / http2 / grpc. The default is http1.
description: Protocol determines if the server listens on http1 / http2 / grpc / websocket. The default is http1.
type: string
enum:
- http1
- http2
- grpc
- websocket
timeout:
description: Timeout determines how long (in seconds) should NGINX App Protect DoS wait for a response. Default is 10 seconds for http1/http2 and 5 seconds for grpc.
type: integer

View File

@ -92,6 +92,8 @@ spec:
properties:
clientCertSecret:
type: string
crlFileName:
type: string
verifyClient:
type: string
verifyDepth:
@ -114,8 +116,14 @@ spec:
description: OIDC defines an Open ID Connect policy.
type: object
properties:
accessTokenEnable:
type: boolean
authEndpoint:
type: string
authExtraArgs:
type: array
items:
type: string
clientID:
type: string
clientSecret:
@ -156,6 +164,8 @@ spec:
description: WAF defines an WAF policy.
type: object
properties:
apBundle:
type: string
apPolicy:
type: string
enable:

View File

@ -73,6 +73,12 @@ spec:
type: string
streamSnippets:
type: string
tls:
description: TLS defines TLS configuration for a TransportServer.
type: object
properties:
secret:
type: string
upstreamParameters:
description: UpstreamParameters defines parameters for an upstream.
type: object

View File

@ -515,6 +515,8 @@ spec:
type: string
jitter:
type: string
keepalive-time:
type: string
mandatory:
type: boolean
passes:

View File

@ -597,6 +597,8 @@ spec:
type: string
jitter:
type: string
keepalive-time:
type: string
mandatory:
type: boolean
passes:

View File

@ -4,18 +4,61 @@
Expand the name of the chart.
*/}}
{{- define "nginx-ingress.name" -}}
{{- printf "%s-%s" .Release.Name .Chart.Name | trunc 63 | trimSuffix "-" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "nginx-ingress.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create a default fully qualified controller name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "nginx-ingress.controller.fullname" -}}
{{- printf "%s-%s" (include "nginx-ingress.fullname" .) .Values.controller.name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create labels
Create chart name and version as used by the chart label.
*/}}
{{- define "nginx-ingress.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "nginx-ingress.labels" -}}
app.kubernetes.io/name: {{ include "nginx-ingress.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }}
helm.sh/chart: {{ include "nginx-ingress.chart" . }}
{{ include "nginx-ingress.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "nginx-ingress.selectorLabels" -}}
app.kubernetes.io/name: {{ include "nginx-ingress.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{- end }}
{{/*
Expand the name of the configmap.
@ -24,7 +67,7 @@ Expand the name of the configmap.
{{- if .Values.controller.customConfigMap -}}
{{ .Values.controller.customConfigMap }}
{{- else -}}
{{- default (include "nginx-ingress.name" .) .Values.controller.config.name -}}
{{- default (include "nginx-ingress.fullname" .) .Values.controller.config.name -}}
{{- end -}}
{{- end -}}
@ -35,7 +78,7 @@ Expand leader election lock name.
{{- if .Values.controller.reportIngressStatus.leaderElectionLockName -}}
{{ .Values.controller.reportIngressStatus.leaderElectionLockName }}
{{- else -}}
{{- printf "%s-%s" (include "nginx-ingress.name" .) "leader-election" -}}
{{- printf "%s-%s" (include "nginx-ingress.fullname" .) "leader-election" -}}
{{- end -}}
{{- end -}}
@ -43,42 +86,25 @@ Expand leader election lock name.
Expand service account name.
*/}}
{{- define "nginx-ingress.serviceAccountName" -}}
{{- default (include "nginx-ingress.name" .) .Values.controller.serviceAccount.name -}}
{{- end -}}
{{/*
Expand service name.
*/}}
{{- define "nginx-ingress.serviceName" -}}
{{- default (include "nginx-ingress.name" .) .Values.controller.service.name }}
{{- end -}}
{{/*
Expand serviceMonitor name.
*/}}
{{- define "nginx-ingress.serviceMonitorName" -}}
{{- default (include "nginx-ingress.name" .) .Values.controller.serviceMonitor.name }}
{{- default (include "nginx-ingress.fullname" .) .Values.controller.serviceAccount.name -}}
{{- end -}}
{{/*
Expand default TLS name.
*/}}
{{- define "nginx-ingress.defaultTLSName" -}}
{{- printf "%s-%s" (include "nginx-ingress.name" .) "default-server-tls" -}}
{{- printf "%s-%s" (include "nginx-ingress.fullname" .) "default-server-tls" -}}
{{- end -}}
{{/*
Expand wildcard TLS name.
*/}}
{{- define "nginx-ingress.wildcardTLSName" -}}
{{- printf "%s-%s" (include "nginx-ingress.name" .) "wildcard-tls" -}}
{{- printf "%s-%s" (include "nginx-ingress.fullname" .) "wildcard-tls" -}}
{{- end -}}
{{/*
Expand app name.
*/}}
{{- define "nginx-ingress.appName" -}}
{{- default (include "nginx-ingress.name" .) .Values.controller.name -}}
{{- define "nginx-ingress.tag" -}}
{{- default .Chart.AppVersion .Values.controller.image.tag -}}
{{- end -}}
{{/*
@ -88,6 +114,6 @@ Expand image name.
{{- if .Values.controller.image.digest -}}
{{- printf "%s@%s" .Values.controller.image.repository .Values.controller.image.digest -}}
{{- else -}}
{{- printf "%s:%s" .Values.controller.image.repository .Values.controller.image.tag -}}
{{- printf "%s:%s" .Values.controller.image.repository (include "nginx-ingress.tag" .) -}}
{{- end -}}
{{- end -}}

View File

@ -2,7 +2,7 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: {{ default (include "nginx-ingress.name" .) .Values.controller.name }}
name: {{ include "nginx-ingress.controller.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "nginx-ingress.labels" . | nindent 4 }}
@ -12,13 +12,13 @@ metadata:
spec:
selector:
matchLabels:
app: {{ include "nginx-ingress.appName" . }}
{{- include "nginx-ingress.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
app: {{ include "nginx-ingress.appName" . }}
{{- include "nginx-ingress.selectorLabels" . | nindent 8 }}
{{- if .Values.nginxServiceMesh.enable }}
nsm.nginx.com/daemonset: {{ default (include "nginx-ingress.name" .) .Values.controller.name }}
nsm.nginx.com/daemonset: {{ include "nginx-ingress.controller.fullname" . }}
spiffe.io/spiffeid: "true"
{{- end }}
{{- if .Values.controller.pod.extraLabels }}
@ -42,6 +42,15 @@ spec:
spec:
serviceAccountName: {{ include "nginx-ingress.serviceAccountName" . }}
automountServiceAccountToken: true
securityContext:
seccompProfile:
type: RuntimeDefault
{{- if .Values.controller.readOnlyRootFilesystem }}
fsGroup: 101 #nginx
{{- end }}
sysctls:
- name: "net.ipv4.ip_unprivileged_port_start"
value: "0"
terminationGracePeriodSeconds: {{ .Values.controller.terminationGracePeriodSeconds }}
{{- if .Values.controller.nodeSelector }}
nodeSelector:
@ -55,9 +64,19 @@ spec:
affinity:
{{ toYaml .Values.controller.affinity | indent 8 }}
{{- end }}
{{- if or .Values.controller.volumes .Values.nginxServiceMesh.enable }}
{{- if or .Values.controller.readOnlyRootFilesystem .Values.nginxServiceMesh.enable .Values.controller.volumes }}
volumes:
{{- end }}
{{- if .Values.controller.readOnlyRootFilesystem }}
- name: nginx-etc
emptyDir: {}
- name: nginx-cache
emptyDir: {}
- name: nginx-lib
emptyDir: {}
- name: nginx-log
emptyDir: {}
{{- end }}
{{- if .Values.nginxServiceMesh.enable }}
- hostPath:
path: /run/spire/sockets
@ -109,17 +128,26 @@ spec:
initialDelaySeconds: {{ .Values.controller.readyStatus.initialDelaySeconds }}
{{- end }}
securityContext:
allowPrivilegeEscalation: true
allowPrivilegeEscalation: false
readOnlyRootFilesystem: {{ .Values.controller.readOnlyRootFilesystem }}
runAsUser: 101 #nginx
runAsNonRoot: true
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
{{- if or .Values.controller.volumeMounts .Values.nginxServiceMesh.enable }}
{{- if or .Values.controller.readOnlyRootFilesystem .Values.nginxServiceMesh.enable .Values.controller.volumeMounts }}
volumeMounts:
{{- end }}
{{- if .Values.controller.readOnlyRootFilesystem }}
- mountPath: /etc/nginx
name: nginx-etc
- mountPath: /var/cache/nginx
name: nginx-cache
- mountPath: /var/lib/nginx
name: nginx-lib
- mountPath: /var/log/nginx
name: nginx-log
{{- end }}
{{- if .Values.nginxServiceMesh.enable }}
- mountPath: /run/spire/sockets
name: spire-agent-socket
@ -136,6 +164,9 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.name
{{- if .Values.controller.env }}
{{ toYaml .Values.controller.env | indent 8 }}
{{- end }}
{{- if .Values.nginxServiceMesh.enable }}
- name: POD_SERVICEACCOUNT
valueFrom:
@ -190,11 +221,13 @@ spec:
{{- else if .Values.controller.reportIngressStatus.externalService }}
- -external-service={{ .Values.controller.reportIngressStatus.externalService }}
{{- else if and (.Values.controller.service.create) (eq .Values.controller.service.type "LoadBalancer") }}
- -external-service={{ include "nginx-ingress.serviceName" . }}
- -external-service={{ include "nginx-ingress.controller.fullname" . }}
{{- end }}
{{- end }}
- -enable-leader-election={{ .Values.controller.reportIngressStatus.enableLeaderElection }}
{{- if .Values.controller.reportIngressStatus.enableLeaderElection }}
- -leader-election-lock-name={{ include "nginx-ingress.leaderElectionName" . }}
{{- end }}
{{- end }}
{{- if .Values.controller.wildcardTLS.secret }}
- -wildcard-tls-secret={{ .Values.controller.wildcardTLS.secret }}
{{- else if and .Values.controller.wildcardTLS.cert .Values.controller.wildcardTLS.key }}
@ -230,8 +263,28 @@ spec:
{{- if .Values.controller.extraContainers }}
{{ toYaml .Values.controller.extraContainers | nindent 6 }}
{{- end }}
{{- if or .Values.controller.readOnlyRootFilesystem .Values.controller.initContainers }}
initContainers:
{{- end }}
{{- if .Values.controller.readOnlyRootFilesystem }}
- name: init-{{ include "nginx-ingress.name" . }}
image: {{ include "nginx-ingress.image" . }}
imagePullPolicy: "{{ .Values.controller.image.pullPolicy }}"
command: ['cp', '-vdR', '/etc/nginx/.', '/mnt/etc']
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 101 #nginx
runAsNonRoot: true
capabilities:
drop:
- ALL
volumeMounts:
- mountPath: /mnt/etc
name: nginx-etc
{{- end }}
{{- if .Values.controller.initContainers }}
initContainers: {{ toYaml .Values.controller.initContainers | nindent 8 }}
{{ toYaml .Values.controller.initContainers | indent 6 }}
{{- end }}
{{- if .Values.controller.strategy }}
updateStrategy:

View File

@ -2,7 +2,7 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ default (include "nginx-ingress.name" .) .Values.controller.name }}
name: {{ include "nginx-ingress.controller.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "nginx-ingress.labels" . | nindent 4 }}
@ -13,13 +13,13 @@ spec:
replicas: {{ .Values.controller.replicaCount }}
selector:
matchLabels:
app: {{ include "nginx-ingress.appName" . }}
{{- include "nginx-ingress.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
app: {{ include "nginx-ingress.appName" . }}
{{- include "nginx-ingress.selectorLabels" . | nindent 8 }}
{{- if .Values.nginxServiceMesh.enable }}
nsm.nginx.com/deployment: {{ default (include "nginx-ingress.name" .) .Values.controller.name }}
nsm.nginx.com/deployment: {{ include "nginx-ingress.controller.fullname" . }}
spiffe.io/spiffeid: "true"
{{- end }}
{{- if .Values.controller.pod.extraLabels }}
@ -57,9 +57,19 @@ spec:
topologySpreadConstraints:
{{ toYaml .Values.controller.topologySpreadConstraints | indent 8 }}
{{- end }}
{{- if or (.Values.controller.volumes) (.Values.nginxServiceMesh.enable) }}
{{- if or .Values.controller.readOnlyRootFilesystem .Values.nginxServiceMesh.enable .Values.controller.volumes }}
volumes:
{{- end }}
{{- if .Values.controller.readOnlyRootFilesystem }}
- name: nginx-etc
emptyDir: {}
- name: nginx-cache
emptyDir: {}
- name: nginx-lib
emptyDir: {}
- name: nginx-log
emptyDir: {}
{{- end }}
{{- if .Values.nginxServiceMesh.enable }}
- hostPath:
path: /run/spire/sockets
@ -74,6 +84,15 @@ spec:
{{- end }}
serviceAccountName: {{ include "nginx-ingress.serviceAccountName" . }}
automountServiceAccountToken: true
securityContext:
seccompProfile:
type: RuntimeDefault
{{- if .Values.controller.readOnlyRootFilesystem }}
fsGroup: 101 #nginx
{{- end }}
sysctls:
- name: "net.ipv4.ip_unprivileged_port_start"
value: "0"
terminationGracePeriodSeconds: {{ .Values.controller.terminationGracePeriodSeconds }}
hostNetwork: {{ .Values.controller.hostNetwork }}
dnsPolicy: {{ .Values.controller.dnsPolicy }}
@ -90,9 +109,9 @@ spec:
containerPort: 80
- name: https
containerPort: 443
{{ if .Values.controller.customPorts }}
{{- if .Values.controller.customPorts }}
{{ toYaml .Values.controller.customPorts | indent 8 }}
{{ end }}
{{- end }}
{{- if .Values.prometheus.create }}
- name: prometheus
containerPort: {{ .Values.prometheus.port }}
@ -114,17 +133,26 @@ spec:
resources:
{{ toYaml .Values.controller.resources | indent 10 }}
securityContext:
allowPrivilegeEscalation: true
allowPrivilegeEscalation: false
readOnlyRootFilesystem: {{ .Values.controller.readOnlyRootFilesystem }}
runAsUser: 101 #nginx
runAsNonRoot: true
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
{{- if or (.Values.controller.volumeMounts) (.Values.nginxServiceMesh.enable) }}
{{- if or .Values.controller.readOnlyRootFilesystem .Values.nginxServiceMesh.enable .Values.controller.volumeMounts }}
volumeMounts:
{{- end }}
{{- if .Values.controller.readOnlyRootFilesystem }}
- mountPath: /etc/nginx
name: nginx-etc
- mountPath: /var/cache/nginx
name: nginx-cache
- mountPath: /var/lib/nginx
name: nginx-lib
- mountPath: /var/log/nginx
name: nginx-log
{{- end }}
{{- if .Values.nginxServiceMesh.enable }}
- mountPath: /run/spire/sockets
name: spire-agent-socket
@ -141,6 +169,9 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.name
{{- if .Values.controller.env }}
{{ toYaml .Values.controller.env | indent 8 }}
{{- end }}
{{- if .Values.nginxServiceMesh.enable }}
- name: POD_SERVICEACCOUNT
valueFrom:
@ -193,9 +224,11 @@ spec:
{{- else if .Values.controller.reportIngressStatus.externalService }}
- -external-service={{ .Values.controller.reportIngressStatus.externalService }}
{{- else if and (.Values.controller.service.create) (eq .Values.controller.service.type "LoadBalancer") }}
- -external-service={{ include "nginx-ingress.serviceName" . }}
- -external-service={{ include "nginx-ingress.controller.fullname" . }}
{{- end }}
{{- end }}
- -enable-leader-election={{ .Values.controller.reportIngressStatus.enableLeaderElection }}
{{- if .Values.controller.reportIngressStatus.enableLeaderElection }}
- -leader-election-lock-name={{ include "nginx-ingress.leaderElectionName" . }}
{{- end }}
{{- if .Values.controller.wildcardTLS.secret }}
@ -233,8 +266,28 @@ spec:
{{- if .Values.controller.extraContainers }}
{{ toYaml .Values.controller.extraContainers | nindent 6 }}
{{- end }}
{{- if or .Values.controller.readOnlyRootFilesystem .Values.controller.initContainers }}
initContainers:
{{- end }}
{{- if .Values.controller.readOnlyRootFilesystem }}
- name: init-{{ include "nginx-ingress.name" . }}
image: {{ include "nginx-ingress.image" . }}
imagePullPolicy: "{{ .Values.controller.image.pullPolicy }}"
command: ['cp', '-vdR', '/etc/nginx/.', '/mnt/etc']
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 101 #nginx
runAsNonRoot: true
capabilities:
drop:
- ALL
volumeMounts:
- mountPath: /mnt/etc
name: nginx-etc
{{- end }}
{{- if .Values.controller.initContainers }}
initContainers: {{ toYaml .Values.controller.initContainers | nindent 8 }}
{{ toYaml .Values.controller.initContainers | indent 6 }}
{{- end }}
{{- if .Values.controller.strategy }}
strategy:

View File

@ -2,7 +2,7 @@
apiVersion: k8s.nginx.org/v1alpha1
kind: GlobalConfiguration
metadata:
name: {{ include "nginx-ingress.name" . }}
name: {{ include "nginx-ingress.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "nginx-ingress.labels" . | nindent 4 }}

View File

@ -2,7 +2,7 @@
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "nginx-ingress.serviceName" . }}
name: {{ include "nginx-ingress.controller.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "nginx-ingress.labels" . | nindent 4 }}
@ -14,7 +14,7 @@ spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ default (include "nginx-ingress.name" .) .Values.controller.name }}
name: {{ include "nginx-ingress.controller.fullname" . }}
minReplicas: {{ .Values.controller.autoscaling.minReplicas }}
maxReplicas: {{ .Values.controller.autoscaling.maxReplicas }}
metrics:

View File

@ -2,6 +2,8 @@ apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
name: {{ .Values.controller.ingressClass }}
labels:
{{- include "nginx-ingress.labels" . | nindent 4 }}
{{- if .Values.controller.setAsDefaultIngress }}
annotations:
ingressclass.kubernetes.io/is-default-class: "true"

View File

@ -1,3 +1,4 @@
{{- if .Values.controller.reportIngressStatus.enableLeaderElection }}
apiVersion: v1
kind: ConfigMap
metadata:
@ -5,7 +6,8 @@ metadata:
namespace: {{ .Release.Namespace }}
labels:
{{- include "nginx-ingress.labels" . | nindent 4 }}
{{- if .Values.controller.reportIngressStatus.annotations }}
annotations:
{{ toYaml .Values.controller.reportIngressStatus.annotations | indent 4 }}
{{- end }}
{{- if .Values.controller.reportIngressStatus.annotations }}
annotations:
{{ toYaml .Values.controller.reportIngressStatus.annotations | indent 4 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,23 @@
{{- if .Values.controller.podDisruptionBudget.enabled -}}
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: {{ include "nginx-ingress.controller.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "nginx-ingress.labels" . | nindent 4 }}
{{- if .Values.controller.podDisruptionBudget.annotations }}
annotations:
{{ toYaml .Values.controller.podDisruptionBudget.annotations | indent 4 }}
{{- end }}
spec:
selector:
matchLabels:
{{- include "nginx-ingress.selectorLabels" . | nindent 6 }}
{{- if .Values.controller.podDisruptionBudget.minAvailable }}
minAvailable: {{ .Values.controller.podDisruptionBudget.minAvailable }}
{{- end }}
{{- if .Values.controller.podDisruptionBudget.maxUnavailable }}
maxUnavailable: {{ .Values.controller.podDisruptionBudget.maxUnavailable }}
{{- end }}
{{- end }}

Some files were not shown because too many files have changed in this diff Show More