Charts CI (#996)

pull/1000/head
Adam Pickering 2024-04-03 13:02:06 -06:00 committed by GitHub
parent a6875456ee
commit 4f970a9636
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
327 changed files with 9096 additions and 5714 deletions

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

BIN
assets/kuma/kuma-2.6.4.tgz Normal file

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -1,7 +1,7 @@
annotations:
artifacthub.io/changes: |
- kind: changed
description: Bump argo-cd to v2.10.4
- kind: added
description: Add sizeLimit params on EmptyDir Volume
artifacthub.io/signKey: |
fingerprint: 2B8F22F57260EFA67BE1C5824B11F800CD9D2252
url: https://argoproj.github.io/argo-helm/pgp_keys.asc
@ -11,7 +11,7 @@ annotations:
catalog.cattle.io/kube-version: '>=1.23.0-0'
catalog.cattle.io/release-name: argo-cd
apiVersion: v2
appVersion: v2.10.4
appVersion: v2.10.5
dependencies:
- condition: redis-ha.enabled
name: redis-ha
@ -33,4 +33,4 @@ name: argo-cd
sources:
- https://github.com/argoproj/argo-helm/tree/main/charts/argo-cd
- https://github.com/argoproj/argo-cd
version: 6.7.3
version: 6.7.8

View File

@ -733,6 +733,7 @@ NAME: my-release
| controller.dnsConfig | object | `{}` | [DNS configuration] |
| controller.dnsPolicy | string | `"ClusterFirst"` | Alternative DNS policy for application controller pods |
| controller.dynamicClusterDistribution | bool | `false` | Enable dynamic cluster distribution (alpha) Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/dynamic-cluster-distribution |
| controller.emptyDir.sizeLimit | string | `""` (defaults not set if not specified i.e. no size limit) | EmptyDir size limit for application controller |
| controller.env | list | `[]` | Environment variables to pass to application controller |
| controller.envFrom | list | `[]` (See [values.yaml]) | envFrom to pass to application controller |
| controller.extraArgs | list | `[]` | Additional command line arguments to pass to application controller |
@ -827,6 +828,7 @@ NAME: my-release
| repoServer.deploymentStrategy | object | `{}` | Deployment strategy to be added to the repo server Deployment |
| repoServer.dnsConfig | object | `{}` | [DNS configuration] |
| repoServer.dnsPolicy | string | `"ClusterFirst"` | Alternative DNS policy for Repo server pods |
| repoServer.emptyDir.sizeLimit | string | `""` (defaults not set if not specified i.e. no size limit) | EmptyDir size limit for repo server |
| repoServer.env | list | `[]` | Environment variables to pass to repo server |
| repoServer.envFrom | list | `[]` (See [values.yaml]) | envFrom to pass to repo server |
| repoServer.existingVolumes | object | `{}` | Volumes to be used in replacement of emptydir on default volumes |
@ -928,6 +930,8 @@ NAME: my-release
| server.certificateSecret.enabled | bool | `false` | Create argocd-server-tls secret |
| server.certificateSecret.key | string | `""` | Private Key of the certificate |
| server.certificateSecret.labels | object | `{}` | Labels to be added to argocd-server-tls secret |
| server.clusterRoleRules.enabled | bool | `false` | Enable custom rules for the server's ClusterRole resource |
| server.clusterRoleRules.rules | list | `[]` | List of custom rules for the server's ClusterRole resource |
| server.containerPorts.metrics | int | `8083` | Metrics container port |
| server.containerPorts.server | int | `8080` | Server container port |
| server.containerSecurityContext | object | See [values.yaml] | Server container-level security context |
@ -935,6 +939,7 @@ NAME: my-release
| server.deploymentStrategy | object | `{}` | Deployment strategy to be added to the server Deployment |
| server.dnsConfig | object | `{}` | [DNS configuration] |
| server.dnsPolicy | string | `"ClusterFirst"` | Alternative DNS policy for Server pods |
| server.emptyDir.sizeLimit | string | `""` (defaults not set if not specified i.e. no size limit) | EmptyDir size limit for the Argo CD server |
| server.env | list | `[]` | Environment variables to pass to Argo CD server |
| server.envFrom | list | `[]` (See [values.yaml]) | envFrom to pass to Argo CD server |
| server.extensions.containerSecurityContext | object | See [values.yaml] | Server UI extensions container-level security context |
@ -1073,6 +1078,7 @@ NAME: my-release
| dex.deploymentStrategy | object | `{}` | Deployment strategy to be added to the Dex server Deployment |
| dex.dnsConfig | object | `{}` | [DNS configuration] |
| dex.dnsPolicy | string | `"ClusterFirst"` | Alternative DNS policy for Dex server pods |
| dex.emptyDir.sizeLimit | string | `""` (defaults not set if not specified i.e. no size limit) | EmptyDir size limit for Dex server |
| dex.enabled | bool | `true` | Enable dex |
| dex.env | list | `[]` | Environment variables to pass to the Dex server |
| dex.envFrom | list | `[]` (See [values.yaml]) | envFrom to pass to the Dex server |
@ -1325,6 +1331,7 @@ If you want to use an existing Redis (eg. a managed service from a cloud provide
| applicationSet.deploymentStrategy | object | `{}` | Deployment strategy to be added to the ApplicationSet controller Deployment |
| applicationSet.dnsConfig | object | `{}` | [DNS configuration] |
| applicationSet.dnsPolicy | string | `"ClusterFirst"` | Alternative DNS policy for ApplicationSet controller pods |
| applicationSet.emptyDir.sizeLimit | string | `""` (defaults not set if not specified i.e. no size limit) | EmptyDir size limit for applicationSet controller |
| applicationSet.enabled | bool | `true` | Enable ApplicationSet controller |
| applicationSet.extraArgs | list | `[]` | ApplicationSet controller command line flags |
| applicationSet.extraContainers | list | `[]` | Additional containers to be added to the ApplicationSet controller pod |

View File

@ -334,7 +334,13 @@ spec:
{{- toYaml . | nindent 6 }}
{{- end }}
- name: argocd-home
{{- if .Values.controller.emptyDir.sizeLimit }}
emptyDir:
sizeLimit: {{ .Values.controller.emptyDir.sizeLimit }}
{{- else }}
emptyDir: {}
{{- end }}
- name: argocd-repo-server-tls
secret:
secretName: argocd-repo-server-tls

View File

@ -333,7 +333,12 @@ spec:
{{- toYaml . | nindent 6 }}
{{- end }}
- name: argocd-home
{{- if .Values.controller.emptyDir.sizeLimit }}
emptyDir:
sizeLimit: {{ .Values.controller.emptyDir.sizeLimit }}
{{- else }}
emptyDir: {}
{{- end }}
- name: argocd-repo-server-tls
secret:
secretName: argocd-repo-server-tls

View File

@ -302,9 +302,19 @@ spec:
configMap:
name: argocd-gpg-keys-cm
- name: gpg-keyring
{{- if .Values.applicationSet.emptyDir.sizeLimit }}
emptyDir:
sizeLimit: {{ .Values.applicationSet.emptyDir.sizeLimit }}
{{- else }}
emptyDir: {}
{{- end }}
- name: tmp
{{- if .Values.applicationSet.emptyDir.sizeLimit }}
emptyDir:
sizeLimit: {{ .Values.applicationSet.emptyDir.sizeLimit }}
{{- else }}
emptyDir: {}
{{- end }}
- name: argocd-repo-server-tls
secret:
secretName: argocd-repo-server-tls

View File

@ -28,6 +28,9 @@ rules:
verbs:
- list
- watch
{{- if (index .Values.configs.params "application.namespaces") }}
- create
{{- end }}
{{- if .Values.notifications.cm.create }}
- apiGroups:
- ""

View File

@ -394,26 +394,46 @@ spec:
{{- if .Values.repoServer.existingVolumes.helmWorkingDir -}}
{{ toYaml .Values.repoServer.existingVolumes.helmWorkingDir | nindent 8 }}
{{- else }}
{{- if .Values.repoServer.emptyDir.sizeLimit }}
emptyDir:
sizeLimit: {{ .Values.repoServer.emptyDir.sizeLimit }}
{{- else }}
emptyDir: {}
{{- end }}
{{- end }}
{{- end }}
- name: plugins
{{- if .Values.repoServer.existingVolumes.plugins -}}
{{ toYaml .Values.repoServer.existingVolumes.plugins | nindent 8 }}
{{- else }}
{{- if .Values.repoServer.emptyDir.sizeLimit }}
emptyDir:
sizeLimit: {{ .Values.repoServer.emptyDir.sizeLimit }}
{{- else }}
emptyDir: {}
{{- end }}
{{- end }}
- name: var-files
{{- if .Values.repoServer.existingVolumes.varFiles -}}
{{ toYaml .Values.repoServer.existingVolumes.varFiles | nindent 8 }}
{{- else }}
{{- if .Values.repoServer.emptyDir.sizeLimit }}
emptyDir:
sizeLimit: {{ .Values.repoServer.emptyDir.sizeLimit }}
{{- else }}
emptyDir: {}
{{- end }}
{{- end }}
- name: tmp
{{- if .Values.repoServer.existingVolumes.tmp -}}
{{ toYaml .Values.repoServer.existingVolumes.tmp | nindent 8 }}
{{- else }}
{{- if .Values.repoServer.emptyDir.sizeLimit }}
emptyDir:
sizeLimit: {{ .Values.repoServer.emptyDir.sizeLimit }}
{{- else }}
emptyDir: {}
{{- end }}
{{- end }}
- name: ssh-known-hosts
configMap:
@ -428,7 +448,12 @@ spec:
{{- if .Values.repoServer.existingVolumes.gpgKeyring -}}
{{ toYaml .Values.repoServer.existingVolumes.gpgKeyring | nindent 8 }}
{{- else }}
{{- if .Values.repoServer.emptyDir.sizeLimit }}
emptyDir:
sizeLimit: {{ .Values.repoServer.emptyDir.sizeLimit }}
{{- else }}
emptyDir: {}
{{- end }}
{{- end }}
- name: argocd-repo-server-tls
secret:

View File

@ -6,6 +6,9 @@ metadata:
labels:
{{- include "argo-cd.labels" (dict "context" . "component" .Values.server.name "name" .Values.server.name) | nindent 4 }}
rules:
{{- if .Values.server.clusterRoleRules.enabled }}
{{- toYaml .Values.server.clusterRoleRules.rules | nindent 2 }}
{{- else }}
- apiGroups:
- '*'
resources:
@ -48,6 +51,17 @@ rules:
- list
- update
- watch
{{- if (index .Values.configs.params "application.namespaces") }}
- apiGroups:
- "argoproj.io"
resources:
- "applications"
verbs:
- create
- delete
- update
- patch
{{- end }}
- apiGroups:
- batch
resources:
@ -62,4 +76,5 @@ rules:
verbs:
{{/* supports triggering workflows from UI */}}
- create
{{- end }}
{{- end }}

View File

@ -442,12 +442,27 @@ spec:
{{- end }}
{{- if .Values.server.extensions.enabled }}
- name: extensions
{{- if .Values.server.emptyDir.sizeLimit }}
emptyDir:
sizeLimit: {{ .Values.server.emptyDir.sizeLimit }}
{{- else }}
emptyDir: {}
{{- end }}
{{- end }}
- name: plugins-home
{{- if .Values.server.emptyDir.sizeLimit }}
emptyDir:
sizeLimit: {{ .Values.server.emptyDir.sizeLimit }}
{{- else }}
emptyDir: {}
{{- end }}
- name: tmp
{{- if .Values.server.emptyDir.sizeLimit }}
emptyDir:
sizeLimit: {{ .Values.server.emptyDir.sizeLimit }}
{{- else }}
emptyDir: {}
{{- end }}
- name: ssh-known-hosts
configMap:
name: argocd-ssh-known-hosts-cm

View File

@ -2381,8 +2381,6 @@ spec:
- metadata
- spec
type: object
required:
- elements
type: object
matrix:
properties:
@ -4725,8 +4723,6 @@ spec:
- metadata
- spec
type: object
required:
- elements
type: object
matrix:
x-kubernetes-preserve-unknown-fields: true
@ -9752,8 +9748,6 @@ spec:
- metadata
- spec
type: object
required:
- elements
type: object
matrix:
x-kubernetes-preserve-unknown-fields: true

View File

@ -187,9 +187,19 @@ spec:
{{- end }}
volumes:
- name: static-files
{{- if .Values.dex.emptyDir.sizeLimit }}
emptyDir:
sizeLimit: {{ .Values.dex.emptyDir.sizeLimit }}
{{- else }}
emptyDir: {}
{{- end }}
- name: dexconfig
{{- if .Values.dex.emptyDir.sizeLimit }}
emptyDir:
sizeLimit: {{ .Values.dex.emptyDir.sizeLimit }}
{{- else }}
emptyDir: {}
{{- end }}
- name: argocd-dex-server-tls
secret:
secretName: argocd-dex-server-tls

View File

@ -673,6 +673,13 @@ controller:
# - name: custom-tools
# emptyDir: {}
## Application controller emptyDir volumes
emptyDir:
# -- EmptyDir size limit for application controller
# @default -- `""` (defaults not set if not specified i.e. no size limit)
sizeLimit: ""
# sizeLimit: "1Gi"
# -- Annotations for the application controller StatefulSet
statefulsetAnnotations: {}
@ -989,6 +996,13 @@ dex:
# -- Additional volumes to the dex pod
volumes: []
## Dex server emptyDir volumes
emptyDir:
# -- EmptyDir size limit for Dex server
# @default -- `""` (defaults not set if not specified i.e. no size limit)
sizeLimit: ""
# sizeLimit: "1Gi"
# TLS certificate configuration via Secret
## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/tls/#configuring-tls-to-argocd-dex-server
## Note: Issuing certificates via cert-manager in not supported right now because it's not possible to restart Dex automatically without extra controllers.
@ -1744,6 +1758,13 @@ server:
# - name: custom-tools
# emptyDir: {}
## Argo CD server emptyDir volumes
emptyDir:
# -- EmptyDir size limit for the Argo CD server
# @default -- `""` (defaults not set if not specified i.e. no size limit)
sizeLimit: ""
# sizeLimit: "1Gi"
# -- Annotations to be added to server Deployment
deploymentAnnotations: {}
@ -2194,6 +2215,14 @@ server:
# -- Termination policy of Openshift Route
termination_policy: None
## Enable this and set the rules: to whatever custom rules you want for the Cluster Role resource.
## Defaults to off
clusterRoleRules:
# -- Enable custom rules for the server's ClusterRole resource
enabled: false
# -- List of custom rules for the server's ClusterRole resource
rules: []
## Repo Server
repoServer:
# -- Repo server name
@ -2358,6 +2387,13 @@ repoServer:
# persistentVolumeClaim:
# claimName: pvc-argocd-repo-server-plugins
## RepoServer emptyDir volumes
emptyDir:
# -- EmptyDir size limit for repo server
# @default -- `""` (defaults not set if not specified i.e. no size limit)
sizeLimit: ""
# sizeLimit: "1Gi"
# -- Toggle the usage of a ephemeral Helm working directory
useEphemeralHelmWorkingDir: true
@ -2645,6 +2681,13 @@ applicationSet:
# -- List of extra volumes to add
extraVolumes: []
## ApplicationSet controller emptyDir volumes
emptyDir:
# -- EmptyDir size limit for applicationSet controller
# @default -- `""` (defaults not set if not specified i.e. no size limit)
sizeLimit: ""
# sizeLimit: "1Gi"
## Metrics service configuration
metrics:
# -- Deploy metrics service

View File

@ -6,11 +6,11 @@ annotations:
category: Database
images: |
- name: cassandra
image: docker.io/bitnami/cassandra:4.1.4-debian-12-r4
image: docker.io/bitnami/cassandra:4.1.4-debian-12-r5
- name: cassandra-exporter
image: docker.io/bitnami/cassandra-exporter:2.3.8-debian-12-r17
image: docker.io/bitnami/cassandra-exporter:2.3.8-debian-12-r18
- name: os-shell
image: docker.io/bitnami/os-shell:12-debian-12-r16
image: docker.io/bitnami/os-shell:12-debian-12-r17
licenses: Apache-2.0
apiVersion: v2
appVersion: 4.1.4
@ -35,4 +35,4 @@ maintainers:
name: cassandra
sources:
- https://github.com/bitnami/charts/tree/main/bitnami/cassandra
version: 11.0.0
version: 11.0.1

View File

@ -82,7 +82,7 @@ diagnosticMode:
image:
registry: docker.io
repository: bitnami/cassandra
tag: 4.1.4-debian-12-r4
tag: 4.1.4-debian-12-r5
digest: ""
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
@ -678,7 +678,7 @@ volumePermissions:
image:
registry: docker.io
repository: bitnami/os-shell
tag: 12-debian-12-r16
tag: 12-debian-12-r17
digest: ""
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
@ -746,7 +746,7 @@ metrics:
image:
registry: docker.io
repository: bitnami/cassandra-exporter
tag: 2.3.8-debian-12-r17
tag: 2.3.8-debian-12-r18
digest: ""
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.

View File

@ -1,9 +1,9 @@
dependencies:
- name: zookeeper
repository: oci://registry-1.docker.io/bitnamicharts
version: 12.11.1
version: 13.0.1
- name: common
repository: oci://registry-1.docker.io/bitnamicharts
version: 2.18.0
digest: sha256:45e9e003da296d6f4d54e86584f77c90f91744427321717b4b7cb3873dd89ea0
generated: "2024-03-05T14:17:52.910919633+01:00"
version: 2.19.0
digest: sha256:7372949fd4cf0b3c5bd39e6c5024a59a880cd3f1a02ef6da1c8910c01e4b7e62
generated: "2024-03-25T18:32:15.845585148+01:00"

View File

@ -22,7 +22,7 @@ dependencies:
- condition: zookeeper.enabled
name: zookeeper
repository: file://./charts/zookeeper
version: 12.x.x
version: 13.x.x
- name: common
repository: file://./charts/common
tags:
@ -45,4 +45,4 @@ maintainers:
name: kafka
sources:
- https://github.com/bitnami/charts/tree/main/bitnami/kafka
version: 27.1.2
version: 28.0.1

File diff suppressed because it is too large Load Diff

View File

@ -2,7 +2,7 @@ annotations:
category: Infrastructure
licenses: Apache-2.0
apiVersion: v2
appVersion: 2.18.0
appVersion: 2.19.0
description: A Library Helm Chart for grouping common logic between bitnami charts.
This chart is not deployable by itself.
home: https://bitnami.com
@ -20,4 +20,4 @@ name: common
sources:
- https://github.com/bitnami/charts
type: library
version: 2.18.0
version: 2.19.0

View File

@ -28,6 +28,10 @@ Usage:
{{- if or (eq .context.Values.global.compatibility.openshift.adaptSecurityContext "force") (and (eq .context.Values.global.compatibility.openshift.adaptSecurityContext "auto") (include "common.compatibility.isOpenshift" .context)) -}}
{{/* Remove incompatible user/group values that do not work in Openshift out of the box */}}
{{- $adaptedContext = omit $adaptedContext "fsGroup" "runAsUser" "runAsGroup" -}}
{{- if not .secContext.seLinuxOptions -}}
{{/* If it is an empty object, we remove it from the resulting context because it causes validation issues */}}
{{- $adaptedContext = omit $adaptedContext "seLinuxOptions" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- end -}}

View File

@ -1,6 +1,6 @@
dependencies:
- name: common
repository: oci://registry-1.docker.io/bitnamicharts
version: 2.16.1
digest: sha256:f808a6fdc9c374d158ad7ff2f2c53a6c409e41da778d768b232dd20f86ef8b47
generated: "2024-02-21T11:56:37.618424604Z"
version: 2.19.0
digest: sha256:ac559eb57710d8904e266424ee364cd686d7e24517871f0c5c67f7c4500c2bcc
generated: "2024-03-12T14:54:51.594358116Z"

View File

@ -4,10 +4,10 @@ annotations:
- name: os-shell
image: docker.io/bitnami/os-shell:12-debian-12-r16
- name: zookeeper
image: docker.io/bitnami/zookeeper:3.9.1-debian-12-r15
image: docker.io/bitnami/zookeeper:3.9.2-debian-12-r0
licenses: Apache-2.0
apiVersion: v2
appVersion: 3.9.1
appVersion: 3.9.2
dependencies:
- name: common
repository: oci://registry-1.docker.io/bitnamicharts
@ -26,4 +26,4 @@ maintainers:
name: zookeeper
sources:
- https://github.com/bitnami/charts/tree/main/bitnami/zookeeper
version: 12.11.1
version: 13.0.1

View File

@ -42,25 +42,105 @@ These commands deploy ZooKeeper on the Kubernetes cluster in the default configu
> **Tip**: List all releases using `helm list`
## Uninstalling the Chart
## Configuration and installation details
To uninstall/delete the `my-release` deployment:
### Resource requests and limits
```console
helm delete my-release
Bitnami charts allow setting resource requests and limits for all containers inside the chart deployment. These are inside the `resources` value (check parameter table). Setting requests is essential for production workloads and these should be adapted to your specific use case.
To make this process easier, the chart contains the `resourcesPreset` values, which automatically sets the `resources` section according to different presets. Check these presets in [the bitnami/common chart](https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15). However, in production workloads using `resourcePreset` is discouraged as it may not fully adapt to your specific needs. Find more information on container resource management in the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/).
### [Rolling vs Immutable tags](https://docs.bitnami.com/tutorials/understand-rolling-tags-containers)
It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image.
Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist.
### Configure log level
You can configure the ZooKeeper log level using the `ZOO_LOG_LEVEL` environment variable or the parameter `logLevel`. By default, it is set to `ERROR` because each use of the liveness probe and the readiness probe produces an `INFO` message on connection and a `WARN` message on disconnection, generating a high volume of noise in your logs.
In order to remove that log noise so levels can be set to 'INFO', two changes must be made.
First, ensure that you are not getting metrics via the deprecated pattern of polling 'mntr' on the ZooKeeper client port. The preferred method of polling for Apache ZooKeeper metrics is the ZooKeeper metrics server. This is supported in this chart when setting `metrics.enabled` to `true`.
Second, to avoid the connection/disconnection messages from the probes, you can set custom values for these checks which direct them to the ZooKeeper Admin Server instead of the client port. By default, an Admin Server will be started that listens on `localhost` at port `8080`. The following is an example of this use of the Admin Server for probes:
```yaml
livenessProbe:
enabled: false
readinessProbe:
enabled: false
customLivenessProbe:
exec:
command: ['/bin/bash', '-c', 'curl -s -m 2 http://localhost:8080/commands/ruok | grep ruok']
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 6
customReadinessProbe:
exec:
command: ['/bin/bash', '-c', 'curl -s -m 2 http://localhost:8080/commands/ruok | grep error | grep null']
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 6
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
You can also set the log4j logging level and what log appenders are turned on, by using `ZOO_LOG4J_PROP` set inside of conf/log4j.properties as zookeeper.root.logger by default to
```console
zookeeper.root.logger=INFO, CONSOLE
```
the available appender is
- CONSOLE
- ROLLINGFILE
- RFAAUDIT
- TRACEFILE
## Persistence
The [Bitnami ZooKeeper](https://github.com/bitnami/containers/tree/main/bitnami/zookeeper) image stores the ZooKeeper data and configurations at the `/bitnami/zookeeper` path of the container.
Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. See the [Parameters](#parameters) section to configure the PVC or to disable persistence.
If you encounter errors when working with persistent volumes, refer to our [troubleshooting guide for persistent volumes](https://docs.bitnami.com/kubernetes/faq/troubleshooting/troubleshooting-persistence-volumes/).
### Adjust permissions of persistent volume mountpoint
As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it.
By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions.
As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination.
You can enable this initContainer by setting `volumePermissions.enabled` to `true`.
### Configure the data log directory
You can use a dedicated device for logs (instead of using the data directory) to help avoiding competition between logging and snaphots. To do so, set the `dataLogDir` parameter with the path to be used for writing transaction logs. Alternatively, set this parameter with an empty string and it will result in the log being written to the data directory (Zookeeper's default behavior).
When using a dedicated device for logs, you can use a PVC to persist the logs. To do so, set `persistence.enabled` to `true`. See the [Persistence Parameters](#persistence-parameters) section for more information.
### Set pod affinity
This chart allows you to set custom pod affinity using the `affinity` parameter. Find more information about pod affinity in the [Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity).
As an alternative, you can use any of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters.
## Parameters
### Global parameters
| Name | Description | Value |
| ------------------------- | ----------------------------------------------- | ----- |
| `global.imageRegistry` | Global Docker image registry | `""` |
| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` |
| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` |
| Name | Description | Value |
| ----------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------ |
| `global.imageRegistry` | Global Docker image registry | `""` |
| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` |
| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` |
| `global.compatibility.openshift.adaptSecurityContext` | Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation) | `auto` |
### Common parameters
@ -157,7 +237,7 @@ The command removes all the Kubernetes components associated with the chart and
| `customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` |
| `customStartupProbe` | Custom startupProbe that overrides the default one | `{}` |
| `lifecycleHooks` | for the ZooKeeper container(s) to automate configuration before or after startup | `{}` |
| `resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `none` |
| `resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `micro` |
| `resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` |
| `podSecurityContext.enabled` | Enabled ZooKeeper pods' Security Context | `true` |
| `podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` |
@ -165,12 +245,12 @@ The command removes all the Kubernetes components associated with the chart and
| `podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` |
| `podSecurityContext.fsGroup` | Set ZooKeeper pod's Security Context fsGroup | `1001` |
| `containerSecurityContext.enabled` | Enabled containers' Security Context | `true` |
| `containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `nil` |
| `containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` |
| `containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` |
| `containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `0` |
| `containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` |
| `containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` |
| `containerSecurityContext.privileged` | Set container's Security Context privileged | `false` |
| `containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `false` |
| `containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `true` |
| `containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` |
| `containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` |
| `containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` |
@ -269,10 +349,10 @@ The command removes all the Kubernetes components associated with the chart and
| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` |
| `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` |
| `volumePermissions.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production). | `none` |
| `volumePermissions.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production). | `nano` |
| `volumePermissions.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` |
| `volumePermissions.containerSecurityContext.enabled` | Enabled init container Security Context | `true` |
| `volumePermissions.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `nil` |
| `volumePermissions.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` |
| `volumePermissions.containerSecurityContext.runAsUser` | User ID for the init container | `0` |
### Metrics parameters
@ -329,7 +409,7 @@ The command removes all the Kubernetes components associated with the chart and
| `tls.quorum.passwordsSecretTruststoreKey` | The secret key from the tls.quorum.passwordsSecretName containing the password for the Truststore. | `""` |
| `tls.quorum.keystorePassword` | Password to access KeyStore if needed | `""` |
| `tls.quorum.truststorePassword` | Password to access TrustStore if needed | `""` |
| `tls.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if tls.resources is set (tls.resources is recommended for production). | `none` |
| `tls.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if tls.resources is set (tls.resources is recommended for production). | `nano` |
| `tls.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` |
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
@ -355,101 +435,23 @@ helm install my-release -f values.yaml oci://REGISTRY_NAME/REPOSITORY_NAME/zooke
> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`.
> **Tip**: You can use the default [values.yaml](https://github.com/bitnami/charts/tree/main/bitnami/zookeeper/values.yaml)
## Configuration and installation details
### Resource requests and limits
Bitnami charts allow setting resource requests and limits for all containers inside the chart deployment. These are inside the `resources` value (check parameter table). Setting requests is essential for production workloads and these should be adapted to your specific use case.
To make this process easier, the chart contains the `resourcesPreset` values, which automatically sets the `resources` section according to different presets. Check these presets in [the bitnami/common chart](https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15). However, in production workloads using `resourcePreset` is discouraged as it may not fully adapt to your specific needs. Find more information on container resource management in the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/).
### [Rolling vs Immutable tags](https://docs.bitnami.com/tutorials/understand-rolling-tags-containers)
It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image.
Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist.
### Configure log level
You can configure the ZooKeeper log level using the `ZOO_LOG_LEVEL` environment variable or the parameter `logLevel`. By default, it is set to `ERROR` because each use of the liveness probe and the readiness probe produces an `INFO` message on connection and a `WARN` message on disconnection, generating a high volume of noise in your logs.
In order to remove that log noise so levels can be set to 'INFO', two changes must be made.
First, ensure that you are not getting metrics via the deprecated pattern of polling 'mntr' on the ZooKeeper client port. The preferred method of polling for Apache ZooKeeper metrics is the ZooKeeper metrics server. This is supported in this chart when setting `metrics.enabled` to `true`.
Second, to avoid the connection/disconnection messages from the probes, you can set custom values for these checks which direct them to the ZooKeeper Admin Server instead of the client port. By default, an Admin Server will be started that listens on `localhost` at port `8080`. The following is an example of this use of the Admin Server for probes:
```yaml
livenessProbe:
enabled: false
readinessProbe:
enabled: false
customLivenessProbe:
exec:
command: ['/bin/bash', '-c', 'curl -s -m 2 http://localhost:8080/commands/ruok | grep ruok']
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 6
customReadinessProbe:
exec:
command: ['/bin/bash', '-c', 'curl -s -m 2 http://localhost:8080/commands/ruok | grep error | grep null']
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 6
```
You can also set the log4j logging level and what log appenders are turned on, by using `ZOO_LOG4J_PROP` set inside of conf/log4j.properties as zookeeper.root.logger by default to
```console
zookeeper.root.logger=INFO, CONSOLE
```
the available appender is
- CONSOLE
- ROLLINGFILE
- RFAAUDIT
- TRACEFILE
## Persistence
The [Bitnami ZooKeeper](https://github.com/bitnami/containers/tree/main/bitnami/zookeeper) image stores the ZooKeeper data and configurations at the `/bitnami/zookeeper` path of the container.
Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. See the [Parameters](#parameters) section to configure the PVC or to disable persistence.
If you encounter errors when working with persistent volumes, refer to our [troubleshooting guide for persistent volumes](https://docs.bitnami.com/kubernetes/faq/troubleshooting/troubleshooting-persistence-volumes/).
### Adjust permissions of persistent volume mountpoint
As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it.
By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions.
As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination.
You can enable this initContainer by setting `volumePermissions.enabled` to `true`.
### Configure the data log directory
You can use a dedicated device for logs (instead of using the data directory) to help avoiding competition between logging and snaphots. To do so, set the `dataLogDir` parameter with the path to be used for writing transaction logs. Alternatively, set this parameter with an empty string and it will result in the log being written to the data directory (Zookeeper's default behavior).
When using a dedicated device for logs, you can use a PVC to persist the logs. To do so, set `persistence.enabled` to `true`. See the [Persistence Parameters](#persistence-parameters) section for more information.
### Set pod affinity
This chart allows you to set custom pod affinity using the `affinity` parameter. Find more information about pod affinity in the [Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity).
As an alternative, you can use any of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters.
## Troubleshooting
Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues).
## Upgrading
### To 13.0.0
This major bump changes the following security defaults:
- `runAsGroup` is changed from `0` to `1001`
- `readOnlyRootFilesystem` is set to `true`
- `resourcesPreset` is changed from `none` to the minimum size working in our test suites (NOTE: `resourcesPreset` is not meant for production usage, but `resources` adapted to your use case).
- `global.compatibility.openshift.adaptSecurityContext` is changed from `disabled` to `auto`.
This could potentially break any customization or init scripts used in your deployment. If this is the case, change the default values to the previous ones.
### To 12.0.0
This new version of the chart includes the new ZooKeeper major version 3.9.x. For more information, please refer to [Zookeeper 3.9.0 Release Notes](https://zookeeper.apache.org/doc/r3.9.0/releasenotes.html)
@ -550,4 +552,4 @@ Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
limitations under the License.

View File

@ -2,7 +2,7 @@ annotations:
category: Infrastructure
licenses: Apache-2.0
apiVersion: v2
appVersion: 2.16.1
appVersion: 2.19.0
description: A Library Helm Chart for grouping common logic between bitnami charts.
This chart is not deployable by itself.
home: https://bitnami.com
@ -20,4 +20,4 @@ name: common
sources:
- https://github.com/bitnami/charts
type: library
version: 2.16.1
version: 2.19.0

View File

@ -0,0 +1,39 @@
{{/*
Copyright VMware, Inc.
SPDX-License-Identifier: APACHE-2.0
*/}}
{{/* vim: set filetype=mustache: */}}
{{/*
Return true if the detected platform is Openshift
Usage:
{{- include "common.compatibility.isOpenshift" . -}}
*/}}
{{- define "common.compatibility.isOpenshift" -}}
{{- if .Capabilities.APIVersions.Has "security.openshift.io/v1" -}}
{{- true -}}
{{- end -}}
{{- end -}}
{{/*
Render a compatible securityContext depending on the platform. By default it is maintained as it is. In other platforms like Openshift we remove default user/group values that do not work out of the box with the restricted-v1 SCC
Usage:
{{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.containerSecurityContext "context" $) -}}
*/}}
{{- define "common.compatibility.renderSecurityContext" -}}
{{- $adaptedContext := .secContext -}}
{{- if .context.Values.global.compatibility -}}
{{- if .context.Values.global.compatibility.openshift -}}
{{- if or (eq .context.Values.global.compatibility.openshift.adaptSecurityContext "force") (and (eq .context.Values.global.compatibility.openshift.adaptSecurityContext "auto") (include "common.compatibility.isOpenshift" .context)) -}}
{{/* Remove incompatible user/group values that do not work in Openshift out of the box */}}
{{- $adaptedContext = omit $adaptedContext "fsGroup" "runAsUser" "runAsGroup" -}}
{{- if not .secContext.seLinuxOptions -}}
{{/* If it is an empty object, we remove it from the resulting context because it causes validation issues */}}
{{- $adaptedContext = omit $adaptedContext "seLinuxOptions" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- omit $adaptedContext "enabled" | toYaml -}}
{{- end -}}

View File

@ -74,7 +74,7 @@ spec:
schedulerName: {{ .Values.schedulerName }}
{{- end }}
{{- if .Values.podSecurityContext.enabled }}
securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 8 }}
securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.podSecurityContext "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.dnsPolicy }}
dnsPolicy: {{ .Values.dnsPolicy }}
@ -101,7 +101,7 @@ spec:
find {{ .Values.dataLogDir }} -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }}
{{- end }}
{{- if .Values.volumePermissions.containerSecurityContext.enabled }}
securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "enabled" | toYaml | nindent 12 }}
securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.volumePermissions.containerSecurityContext "context" $) | nindent 12 }}
{{- end }}
{{- if .Values.volumePermissions.resources }}
resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }}
@ -124,7 +124,7 @@ spec:
image: {{ include "zookeeper.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
{{- if .Values.containerSecurityContext.enabled }}
securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }}
securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.containerSecurityContext "context" $) | nindent 12 }}
{{- end }}
command:
- /scripts/init-certs.sh
@ -191,7 +191,7 @@ spec:
image: {{ template "zookeeper.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
{{- if .Values.containerSecurityContext.enabled }}
securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }}
securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.containerSecurityContext "context" $) | nindent 12 }}
{{- end }}
{{- if .Values.diagnosticMode.enabled }}
command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }}

View File

@ -19,6 +19,15 @@ global:
##
imagePullSecrets: []
storageClass: ""
## Compatibility adaptations for Kubernetes platforms
##
compatibility:
## Compatibility adaptations for Openshift
##
openshift:
## @param global.compatibility.openshift.adaptSecurityContext Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation)
##
adaptSecurityContext: auto
## @section Common parameters
##
@ -76,7 +85,7 @@ diagnosticMode:
image:
registry: docker.io
repository: bitnami/zookeeper
tag: 3.9.1-debian-12-r15
tag: 3.9.2-debian-12-r0
digest: ""
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
@ -312,7 +321,7 @@ lifecycleHooks: {}
## @param resourcesPreset Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
##
resourcesPreset: "none"
resourcesPreset: "micro"
## @param resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
## Example:
## resources:
@ -353,12 +362,12 @@ podSecurityContext:
##
containerSecurityContext:
enabled: true
seLinuxOptions: null
seLinuxOptions: {}
runAsUser: 1001
runAsGroup: 0
runAsGroup: 1001
runAsNonRoot: true
privileged: false
readOnlyRootFilesystem: false
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
@ -767,7 +776,7 @@ volumePermissions:
## @param volumePermissions.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production).
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
##
resourcesPreset: "none"
resourcesPreset: "nano"
## @param volumePermissions.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
## Example:
## resources:
@ -788,7 +797,7 @@ volumePermissions:
##
containerSecurityContext:
enabled: true
seLinuxOptions: null
seLinuxOptions: {}
runAsUser: 0
## @section Metrics parameters
##
@ -974,7 +983,7 @@ tls:
## @param tls.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if tls.resources is set (tls.resources is recommended for production).
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
##
resourcesPreset: "none"
resourcesPreset: "nano"
## @param tls.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
## Example:
## resources:

View File

@ -8,6 +8,7 @@ SPDX-License-Identifier: APACHE-2.0
{{- $replicaCount := .Values.broker.replicaCount | int }}
{{- range $i := until $replicaCount }}
{{- $targetPod := printf "%s-broker-%d" (printf "%s" $fullname) $i }}
{{- $_ := set $ "targetPod" $targetPod }}
apiVersion: v1
kind: Service
metadata:

View File

@ -9,6 +9,7 @@ SPDX-License-Identifier: APACHE-2.0
{{- $replicaCount := .Values.controller.replicaCount | int }}
{{- range $i := until $replicaCount }}
{{- $targetPod := printf "%s-controller-%d" $fullname $i }}
{{- $_ := set $ "targetPod" $targetPod }}
apiVersion: v1
kind: Service
metadata:

View File

@ -1,23 +0,0 @@
{{- /*
Copyright VMware, Inc.
SPDX-License-Identifier: APACHE-2.0
*/}}
{{- if and .Values.networkPolicy.enabled .Values.networkPolicy.egressRules.customRules }}
kind: NetworkPolicy
apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }}
metadata:
name: {{ printf "%s-egress" (include "common.names.fullname" .) }}
namespace: {{ include "common.names.namespace" . | quote }}
labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
spec:
podSelector:
matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 6 }}
policyTypes:
- Egress
egress:
{{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.egressRules.customRules "context" $) | nindent 4 }}
{{- end }}

View File

@ -1,53 +0,0 @@
{{- /*
Copyright VMware, Inc.
SPDX-License-Identifier: APACHE-2.0
*/}}
{{- if .Values.networkPolicy.enabled }}
kind: NetworkPolicy
apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }}
metadata:
name: {{ printf "%s-ingress" (include "common.names.fullname" .) }}
namespace: {{ include "common.names.namespace" . | quote }}
labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
spec:
podSelector:
matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 6 }}
policyTypes:
- Ingress
ingress:
# Allow client connections
- ports:
- port: {{ .Values.listeners.client.containerPort }}
{{- if not .Values.networkPolicy.allowExternal }}
from:
- podSelector:
matchLabels:
{{ template "common.names.fullname" . }}-client: "true"
{{- if .Values.networkPolicy.explicitNamespacesSelector }}
namespaceSelector: {{- toYaml .Values.networkPolicy.explicitNamespacesSelector | nindent 12 }}
{{- end }}
{{- end }}
# Allow communication inter-broker
- ports:
- port: {{ .Values.listeners.interbroker.containerPort }}
from:
- podSelector:
matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 14 }}
# Allow External connection
{{- if .Values.externalAccess.enabled }}
- ports:
- port: {{ .Values.listeners.external.containerPort }}
{{- if .Values.networkPolicy.externalAccess.from }}
from: {{- include "common.tplvalues.render" ( dict "value" .Values.networkPolicy.externalAccess.from "context" $ ) | nindent 8 }}
{{- end }}
{{- end }}
{{- if .Values.metrics.kafka.enabled }}
# Allow prometheus scrapes
- ports:
- port: {{ .Values.metrics.kafka.containerPorts.metrics }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,86 @@
{{- /*
Copyright VMware, Inc.
SPDX-License-Identifier: APACHE-2.0
*/}}
{{- if .Values.networkPolicy.enabled }}
kind: NetworkPolicy
apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }}
metadata:
name: {{ include "common.names.fullname" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
spec:
{{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }}
podSelector:
matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }}
policyTypes:
- Ingress
- Egress
{{- if .Values.networkPolicy.allowExternalEgress }}
egress:
- {}
{{- else }}
egress:
# Allow dns resolution
- ports:
- port: 53
protocol: UDP
- port: 53
protocol: TCP
# Allow internal communications between nodes
- ports:
- port: {{ .Values.listeners.client.containerPort }}
- port: {{ .Values.listeners.interbroker.containerPort }}
{{- if .Values.externalAccess.enabled }}
- port: {{ .Values.listeners.external.containerPort }}
{{- end }}
to:
- podSelector:
matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 14 }}
{{- if .Values.networkPolicy.extraEgress }}
{{- include "common.tplvalues.render" ( dict "value" .Values.rts.networkPolicy.extraEgress "context" $ ) | nindent 4 }}
{{- end }}
{{- end }}
ingress:
# Allow client connections
- ports:
- port: {{ .Values.listeners.client.containerPort }}
- port: {{ .Values.listeners.interbroker.containerPort }}
{{- if .Values.externalAccess.enabled }}
- port: {{ .Values.listeners.external.containerPort }}
{{- end }}
{{- if .Values.metrics.kafka.enabled }}
- port: {{ .Values.metrics.kafka.containerPorts.metrics }}
{{- end }}
{{- if not .Values.networkPolicy.allowExternal }}
from:
- podSelector:
matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 14 }}
- podSelector:
matchLabels:
{{ include "common.names.fullname" . }}-client: "true"
- podSelector:
matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 14 }}
{{- if .Values.networkPolicy.ingressNSMatchLabels }}
- namespaceSelector:
matchLabels:
{{- range $key, $value := .Values.networkPolicy.ingressNSMatchLabels }}
{{ $key | quote }}: {{ $value | quote }}
{{- end }}
{{- if .Values.networkPolicy.ingressNSPodMatchLabels }}
podSelector:
matchLabels:
{{- range $key, $value := .Values.networkPolicy.ingressNSPodMatchLabels }}
{{ $key | quote }}: {{ $value | quote }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- if .Values.networkPolicy.extraIngress }}
{{- include "common.tplvalues.render" ( dict "value" .Values.networkPolicy.extraIngress "context" $ ) | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -27,7 +27,7 @@ global:
openshift:
## @param global.compatibility.openshift.adaptSecurityContext Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation)
##
adaptSecurityContext: disabled
adaptSecurityContext: auto
## @section Common parameters
##
@ -599,7 +599,7 @@ controller:
## @param controller.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if controller.resources is set (controller.resources is recommended for production).
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
##
resourcesPreset: "none"
resourcesPreset: "small"
## @param controller.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
## Example:
## resources:
@ -632,7 +632,9 @@ controller:
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
## @param controller.containerSecurityContext.enabled Enable Kafka containers' Security Context
## @param controller.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
## @param controller.containerSecurityContext.runAsUser Set Kafka containers' Security Context runAsUser
## @param controller.containerSecurityContext.runAsUser Set containers' Security Context runAsUser
## @param controller.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
## @param controller.containerSecurityContext.runAsGroup Set Kafka containers' Security Context runAsGroup
## @param controller.containerSecurityContext.runAsNonRoot Set Kafka containers' Security Context runAsNonRoot
## @param controller.containerSecurityContext.allowPrivilegeEscalation Force the child process to be run as non-privileged
## @param controller.containerSecurityContext.readOnlyRootFilesystem Allows the pod to mount the RootFS as ReadOnly only
@ -646,8 +648,9 @@ controller:
##
containerSecurityContext:
enabled: true
seLinuxOptions: null
seLinuxOptions: {}
runAsUser: 1001
runAsGroup: 1001
runAsNonRoot: true
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
@ -1007,7 +1010,7 @@ broker:
## @param broker.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if broker.resources is set (broker.resources is recommended for production).
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
##
resourcesPreset: "none"
resourcesPreset: "small"
## @param broker.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
## Example:
## resources:
@ -1040,7 +1043,8 @@ broker:
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
## @param broker.containerSecurityContext.enabled Enable Kafka containers' Security Context
## @param broker.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
## @param broker.containerSecurityContext.runAsUser Set Kafka containers' Security Context runAsUser
## @param broker.containerSecurityContext.runAsUser Set containers' Security Context runAsUser
## @param broker.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
## @param broker.containerSecurityContext.runAsNonRoot Set Kafka containers' Security Context runAsNonRoot
## @param broker.containerSecurityContext.allowPrivilegeEscalation Force the child process to be run as non-privileged
## @param broker.containerSecurityContext.readOnlyRootFilesystem Allows the pod to mount the RootFS as ReadOnly only
@ -1054,8 +1058,9 @@ broker:
##
containerSecurityContext:
enabled: true
seLinuxOptions: null
seLinuxOptions: {}
runAsUser: 1001
runAsGroup: 1001
runAsNonRoot: true
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
@ -1413,7 +1418,7 @@ externalAccess:
## @param externalAccess.autoDiscovery.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if externalAccess.autoDiscovery.resources is set (externalAccess.autoDiscovery.resources is recommended for production).
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
##
resourcesPreset: "none"
resourcesPreset: "nano"
## @param externalAccess.autoDiscovery.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
## Example:
## resources:
@ -1429,7 +1434,8 @@ externalAccess:
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
## @param externalAccess.autoDiscovery.containerSecurityContext.enabled Enable Kafka auto-discovery containers' Security Context
## @param externalAccess.autoDiscovery.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
## @param externalAccess.autoDiscovery.containerSecurityContext.runAsUser Set Kafka auto-discovery containers' Security Context runAsUser
## @param externalAccess.autoDiscovery.containerSecurityContext.runAsUser Set containers' Security Context runAsUser
## @param externalAccess.autoDiscovery.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
## @param externalAccess.autoDiscovery.containerSecurityContext.runAsNonRoot Set Kafka auto-discovery containers' Security Context runAsNonRoot
## @param externalAccess.autoDiscovery.containerSecurityContext.allowPrivilegeEscalation Set Kafka auto-discovery containers' Security Context allowPrivilegeEscalation
## @param externalAccess.autoDiscovery.containerSecurityContext.readOnlyRootFilesystem Set Kafka auto-discovery containers' Security Context readOnlyRootFilesystem
@ -1444,8 +1450,9 @@ externalAccess:
##
containerSecurityContext:
enabled: true
seLinuxOptions: null
seLinuxOptions: {}
runAsUser: 1001
runAsGroup: 1001
runAsNonRoot: true
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
@ -1627,47 +1634,53 @@ externalAccess:
networkPolicy:
## @param networkPolicy.enabled Specifies whether a NetworkPolicy should be created
##
enabled: false
enabled: true
## @param networkPolicy.allowExternal Don't require client label for connections
## When set to false, only pods with the correct client label will have network access to the port Kafka is
## When set to false, only pods with the correct client label will have network access to the port Redis® is
## listening on. When true, zookeeper accept connections from any source (with the correct destination port).
##
allowExternal: true
## @param networkPolicy.explicitNamespacesSelector A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed
## If explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace
## and that match other criteria, the ones that have the good label, can reach the kafka.
## But sometimes, we want the kafka to be accessible to clients from other namespaces, in this case, we can use this
## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added.
## @param networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
##
allowExternalEgress: true
## @param networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolice
## e.g:
## explicitNamespacesSelector:
## matchLabels:
## role: frontend
## matchExpressions:
## - {key: role, operator: In, values: [frontend]}
##
explicitNamespacesSelector: {}
## @param networkPolicy.externalAccess.from customize the from section for External Access on tcp-external port
## extraIngress:
## - ports:
## - port: 1234
## from:
## - podSelector:
## - matchLabels:
## - role: frontend
## - podSelector:
## - matchExpressions:
## - key: role
## operator: In
## values:
## - frontend
extraIngress: []
## @param networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy
## e.g:
## - ipBlock:
## cidr: 172.9.0.0/16
## except:
## - 172.9.1.0/24
## extraEgress:
## - ports:
## - port: 1234
## to:
## - podSelector:
## - matchLabels:
## - role: frontend
## - podSelector:
## - matchExpressions:
## - key: role
## operator: In
## values:
## - frontend
##
externalAccess:
from: []
## @param networkPolicy.egressRules.customRules [object] Custom network policy rule
extraEgress: []
## @param networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
## @param networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
##
egressRules:
## Additional custom egress rules
## e.g:
## customRules:
## - to:
## - namespaceSelector:
## matchLabels:
## label: example
##
customRules: []
ingressNSMatchLabels: {}
ingressNSPodMatchLabels: {}
## @section Volume Permissions parameters
##
@ -1704,7 +1717,7 @@ volumePermissions:
## @param volumePermissions.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production).
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
##
resourcesPreset: "none"
resourcesPreset: "nano"
## @param volumePermissions.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
## Example:
## resources:
@ -1723,7 +1736,7 @@ volumePermissions:
## @param volumePermissions.containerSecurityContext.runAsUser User ID for the init container
##
containerSecurityContext:
seLinuxOptions: null
seLinuxOptions: {}
runAsUser: 0
## @section Other Parameters
##
@ -1885,7 +1898,7 @@ metrics:
## @param metrics.kafka.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if metrics.kafka.resources is set (metrics.kafka.resources is recommended for production).
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
##
resourcesPreset: "none"
resourcesPreset: "micro"
## @param metrics.kafka.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
## Example:
## resources:
@ -1918,7 +1931,8 @@ metrics:
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
## @param metrics.kafka.containerSecurityContext.enabled Enable Kafka exporter containers' Security Context
## @param metrics.kafka.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
## @param metrics.kafka.containerSecurityContext.runAsUser Set Kafka exporter containers' Security Context runAsUser
## @param metrics.kafka.containerSecurityContext.runAsUser Set containers' Security Context runAsUser
## @param metrics.kafka.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
## @param metrics.kafka.containerSecurityContext.runAsNonRoot Set Kafka exporter containers' Security Context runAsNonRoot
## @param metrics.kafka.containerSecurityContext.allowPrivilegeEscalation Set Kafka exporter containers' Security Context allowPrivilegeEscalation
## @param metrics.kafka.containerSecurityContext.readOnlyRootFilesystem Set Kafka exporter containers' Security Context readOnlyRootFilesystem
@ -1932,8 +1946,9 @@ metrics:
##
containerSecurityContext:
enabled: true
seLinuxOptions: null
seLinuxOptions: {}
runAsUser: 1001
runAsGroup: 1001
runAsNonRoot: true
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
@ -2124,7 +2139,8 @@ metrics:
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
## @param metrics.jmx.containerSecurityContext.enabled Enable Prometheus JMX exporter containers' Security Context
## @param metrics.jmx.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
## @param metrics.jmx.containerSecurityContext.runAsUser Set Prometheus JMX exporter containers' Security Context runAsUser
## @param metrics.jmx.containerSecurityContext.runAsUser Set containers' Security Context runAsUser
## @param metrics.jmx.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
## @param metrics.jmx.containerSecurityContext.runAsNonRoot Set Prometheus JMX exporter containers' Security Context runAsNonRoot
## @param metrics.jmx.containerSecurityContext.allowPrivilegeEscalation Set Prometheus JMX exporter containers' Security Context allowPrivilegeEscalation
## @param metrics.jmx.containerSecurityContext.readOnlyRootFilesystem Set Prometheus JMX exporter containers' Security Context readOnlyRootFilesystem
@ -2138,8 +2154,9 @@ metrics:
##
containerSecurityContext:
enabled: true
seLinuxOptions: null
seLinuxOptions: {}
runAsUser: 1001
runAsGroup: 1001
runAsNonRoot: true
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
@ -2154,7 +2171,7 @@ metrics:
## @param metrics.jmx.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if metrics.jmx.resources is set (metrics.jmx.resources is recommended for production).
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
##
resourcesPreset: "none"
resourcesPreset: "micro"
## @param metrics.jmx.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
## Example:
## resources:
@ -2439,7 +2456,7 @@ provisioning:
## @param provisioning.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if provisioning.resources is set (provisioning.resources is recommended for production).
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
##
resourcesPreset: "none"
resourcesPreset: "micro"
## @param provisioning.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
## Example:
## resources:
@ -2472,7 +2489,8 @@ provisioning:
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
## @param provisioning.containerSecurityContext.enabled Enable Kafka provisioning containers' Security Context
## @param provisioning.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
## @param provisioning.containerSecurityContext.runAsUser Set Kafka provisioning containers' Security Context runAsUser
## @param provisioning.containerSecurityContext.runAsUser Set containers' Security Context runAsUser
## @param provisioning.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
## @param provisioning.containerSecurityContext.runAsNonRoot Set Kafka provisioning containers' Security Context runAsNonRoot
## @param provisioning.containerSecurityContext.allowPrivilegeEscalation Set Kafka provisioning containers' Security Context allowPrivilegeEscalation
## @param provisioning.containerSecurityContext.readOnlyRootFilesystem Set Kafka provisioning containers' Security Context readOnlyRootFilesystem
@ -2486,8 +2504,9 @@ provisioning:
##
containerSecurityContext:
enabled: true
seLinuxOptions: null
seLinuxOptions: {}
runAsUser: 1001
runAsGroup: 1001
runAsNonRoot: true
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true

View File

@ -1,6 +1,6 @@
dependencies:
- name: common
repository: oci://registry-1.docker.io/bitnamicharts
version: 2.19.0
digest: sha256:ac559eb57710d8904e266424ee364cd686d7e24517871f0c5c67f7c4500c2bcc
generated: "2024-03-08T11:25:32.224991562+01:00"
version: 2.19.1
digest: sha256:c883732817d9aaa3304f7b3109262aa338959de15b432dc5a2dbde13d2e136a5
generated: "2024-04-02T11:21:12.855408532Z"

View File

@ -6,14 +6,14 @@ annotations:
category: Database
images: |
- name: mariadb
image: docker.io/bitnami/mariadb:11.2.3-debian-12-r4
image: docker.io/bitnami/mariadb:11.3.2-debian-12-r1
- name: mysqld-exporter
image: docker.io/bitnami/mysqld-exporter:0.15.1-debian-12-r8
image: docker.io/bitnami/mysqld-exporter:0.15.1-debian-12-r10
- name: os-shell
image: docker.io/bitnami/os-shell:12-debian-12-r16
image: docker.io/bitnami/os-shell:12-debian-12-r18
licenses: Apache-2.0
apiVersion: v2
appVersion: 11.2.3
appVersion: 11.3.2
dependencies:
- name: common
repository: file://./charts/common
@ -37,4 +37,4 @@ maintainers:
name: mariadb
sources:
- https://github.com/bitnami/charts/tree/main/bitnami/mariadb
version: 17.0.1
version: 18.0.1

View File

@ -2,7 +2,7 @@ annotations:
category: Infrastructure
licenses: Apache-2.0
apiVersion: v2
appVersion: 2.19.0
appVersion: 2.19.1
description: A Library Helm Chart for grouping common logic between bitnami charts.
This chart is not deployable by itself.
home: https://bitnami.com
@ -20,4 +20,4 @@ name: common
sources:
- https://github.com/bitnami/charts
type: library
version: 2.19.0
version: 2.19.1

View File

@ -11,7 +11,7 @@ These presets are for basic testing and not meant to be used in production
{{ include "common.resources.preset" (dict "type" "nano") -}}
*/}}
{{- define "common.resources.preset" -}}
{{/* The limits are the requests increased by 50% (except ephemeral-storage)*/}}
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
{{- $presets := dict
"nano" (dict
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
@ -34,11 +34,11 @@ These presets are for basic testing and not meant to be used in production
"limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "1024Mi")
)
"xlarge" (dict
"requests" (dict "cpu" "2.0" "memory" "4096Mi" "ephemeral-storage" "50Mi")
"requests" (dict "cpu" "1.5" "memory" "4096Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "1024Mi")
)
"2xlarge" (dict
"requests" (dict "cpu" "4.0" "memory" "8192Mi" "ephemeral-storage" "50Mi")
"requests" (dict "cpu" "1.5" "memory" "4096Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "1024Mi")
)
}}
@ -47,4 +47,4 @@ These presets are for basic testing and not meant to be used in production
{{- else -}}
{{- printf "ERROR: Preset key '%s' invalid. Allowed values are %s" .type (join "," (keys $presets)) | fail -}}
{{- end -}}
{{- end -}}
{{- end -}}

View File

@ -95,7 +95,7 @@ serviceBindings:
image:
registry: docker.io
repository: bitnami/mariadb
tag: 11.2.3-debian-12-r4
tag: 11.3.2-debian-12-r1
digest: ""
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
@ -1053,7 +1053,7 @@ volumePermissions:
image:
registry: docker.io
repository: bitnami/os-shell
tag: 12-debian-12-r16
tag: 12-debian-12-r18
digest: ""
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace)
@ -1097,7 +1097,7 @@ metrics:
image:
registry: docker.io
repository: bitnami/mysqld-exporter
tag: 0.15.1-debian-12-r8
tag: 0.15.1-debian-12-r10
digest: ""
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace)
@ -1321,8 +1321,8 @@ networkPolicy:
##
enabled: true
## @param networkPolicy.allowExternal The Policy model to apply
## When set to false, only pods with the correct client label will have network access to the ports Keycloak is
## listening on. When true, Keycloak will accept connections from any source (with the correct destination port).
## When set to false, only pods with the correct client label will have network access to the ports MariaDB is
## listening on. When true, MariaDB will accept connections from any source (with the correct destination port).
##
allowExternal: true
## @param networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.

View File

@ -1,6 +1,6 @@
dependencies:
- name: common
repository: oci://registry-1.docker.io/bitnamicharts
version: 2.19.0
digest: sha256:ac559eb57710d8904e266424ee364cd686d7e24517871f0c5c67f7c4500c2bcc
generated: "2024-03-08T11:23:56.170052821+01:00"
version: 2.19.1
digest: sha256:c883732817d9aaa3304f7b3109262aa338959de15b432dc5a2dbde13d2e136a5
generated: "2024-04-02T10:58:49.170367596Z"

View File

@ -6,11 +6,11 @@ annotations:
category: Database
images: |
- name: mysql
image: docker.io/bitnami/mysql:8.0.36-debian-12-r8
image: docker.io/bitnami/mysql:8.0.36-debian-12-r10
- name: mysqld-exporter
image: docker.io/bitnami/mysqld-exporter:0.15.1-debian-12-r8
image: docker.io/bitnami/mysqld-exporter:0.15.1-debian-12-r10
- name: os-shell
image: docker.io/bitnami/os-shell:12-debian-12-r16
image: docker.io/bitnami/os-shell:12-debian-12-r18
licenses: Apache-2.0
apiVersion: v2
appVersion: 8.0.36
@ -36,4 +36,4 @@ maintainers:
name: mysql
sources:
- https://github.com/bitnami/charts/tree/main/bitnami/mysql
version: 10.1.0
version: 10.1.1

View File

@ -2,7 +2,7 @@ annotations:
category: Infrastructure
licenses: Apache-2.0
apiVersion: v2
appVersion: 2.19.0
appVersion: 2.19.1
description: A Library Helm Chart for grouping common logic between bitnami charts.
This chart is not deployable by itself.
home: https://bitnami.com
@ -20,4 +20,4 @@ name: common
sources:
- https://github.com/bitnami/charts
type: library
version: 2.19.0
version: 2.19.1

View File

@ -11,7 +11,7 @@ These presets are for basic testing and not meant to be used in production
{{ include "common.resources.preset" (dict "type" "nano") -}}
*/}}
{{- define "common.resources.preset" -}}
{{/* The limits are the requests increased by 50% (except ephemeral-storage)*/}}
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
{{- $presets := dict
"nano" (dict
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
@ -34,11 +34,11 @@ These presets are for basic testing and not meant to be used in production
"limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "1024Mi")
)
"xlarge" (dict
"requests" (dict "cpu" "2.0" "memory" "4096Mi" "ephemeral-storage" "50Mi")
"requests" (dict "cpu" "1.5" "memory" "4096Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "1024Mi")
)
"2xlarge" (dict
"requests" (dict "cpu" "4.0" "memory" "8192Mi" "ephemeral-storage" "50Mi")
"requests" (dict "cpu" "1.5" "memory" "4096Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "1024Mi")
)
}}
@ -47,4 +47,4 @@ These presets are for basic testing and not meant to be used in production
{{- else -}}
{{- printf "ERROR: Preset key '%s' invalid. Allowed values are %s" .type (join "," (keys $presets)) | fail -}}
{{- end -}}
{{- end -}}
{{- end -}}

View File

@ -90,7 +90,7 @@ diagnosticMode:
image:
registry: docker.io
repository: bitnami/mysql
tag: 8.0.36-debian-12-r8
tag: 8.0.36-debian-12-r10
digest: ""
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
@ -1063,8 +1063,8 @@ networkPolicy:
##
enabled: true
## @param networkPolicy.allowExternal The Policy model to apply
## When set to false, only pods with the correct client label will have network access to the ports Keycloak is
## listening on. When true, Keycloak will accept connections from any source (with the correct destination port).
## When set to false, only pods with the correct client label will have network access to the ports MySQL is
## listening on. When true, MySQL will accept connections from any source (with the correct destination port).
##
allowExternal: true
## @param networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
@ -1129,7 +1129,7 @@ volumePermissions:
image:
registry: docker.io
repository: bitnami/os-shell
tag: 12-debian-12-r16
tag: 12-debian-12-r18
digest: ""
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
@ -1174,7 +1174,7 @@ metrics:
image:
registry: docker.io
repository: bitnami/mysqld-exporter
tag: 0.15.1-debian-12-r8
tag: 0.15.1-debian-12-r10
digest: ""
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.

View File

@ -1,6 +1,6 @@
dependencies:
- name: common
repository: oci://registry-1.docker.io/bitnamicharts
version: 2.19.0
digest: sha256:ac559eb57710d8904e266424ee364cd686d7e24517871f0c5c67f7c4500c2bcc
generated: "2024-03-11T20:27:44.112846437Z"
version: 2.19.1
digest: sha256:c883732817d9aaa3304f7b3109262aa338959de15b432dc5a2dbde13d2e136a5
generated: "2024-04-02T18:43:43.860368523Z"

View File

@ -6,11 +6,11 @@ annotations:
category: Database
images: |
- name: os-shell
image: docker.io/bitnami/os-shell:12-debian-12-r17
image: docker.io/bitnami/os-shell:12-debian-12-r18
- name: postgres-exporter
image: docker.io/bitnami/postgres-exporter:0.15.0-debian-12-r14
image: docker.io/bitnami/postgres-exporter:0.15.0-debian-12-r15
- name: postgresql
image: docker.io/bitnami/postgresql:16.2.0-debian-12-r10
image: docker.io/bitnami/postgresql:16.2.0-debian-12-r12
licenses: Apache-2.0
apiVersion: v2
appVersion: 16.2.0
@ -38,4 +38,4 @@ maintainers:
name: postgresql
sources:
- https://github.com/bitnami/charts/tree/main/bitnami/postgresql
version: 15.1.4
version: 15.2.2

View File

@ -478,6 +478,7 @@ If you already have data in it, you will fail to sync to standby nodes for all c
| `primary.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` |
| `primary.service.headless.annotations` | Additional custom annotations for headless PostgreSQL primary service | `{}` |
| `primary.persistence.enabled` | Enable PostgreSQL Primary data persistence using PVC | `true` |
| `primary.persistence.volumeName` | Name to assign the volume | `data` |
| `primary.persistence.existingClaim` | Name of an existing PVC to use | `""` |
| `primary.persistence.mountPath` | The path the volume will be mounted at | `/bitnami/postgresql` |
| `primary.persistence.subPath` | The subdirectory of the volume to mount to | `""` |

View File

@ -2,7 +2,7 @@ annotations:
category: Infrastructure
licenses: Apache-2.0
apiVersion: v2
appVersion: 2.19.0
appVersion: 2.19.1
description: A Library Helm Chart for grouping common logic between bitnami charts.
This chart is not deployable by itself.
home: https://bitnami.com
@ -20,4 +20,4 @@ name: common
sources:
- https://github.com/bitnami/charts
type: library
version: 2.19.0
version: 2.19.1

View File

@ -11,7 +11,7 @@ These presets are for basic testing and not meant to be used in production
{{ include "common.resources.preset" (dict "type" "nano") -}}
*/}}
{{- define "common.resources.preset" -}}
{{/* The limits are the requests increased by 50% (except ephemeral-storage)*/}}
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
{{- $presets := dict
"nano" (dict
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
@ -34,11 +34,11 @@ These presets are for basic testing and not meant to be used in production
"limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "1024Mi")
)
"xlarge" (dict
"requests" (dict "cpu" "2.0" "memory" "4096Mi" "ephemeral-storage" "50Mi")
"requests" (dict "cpu" "1.5" "memory" "4096Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "1024Mi")
)
"2xlarge" (dict
"requests" (dict "cpu" "4.0" "memory" "8192Mi" "ephemeral-storage" "50Mi")
"requests" (dict "cpu" "1.5" "memory" "4096Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "1024Mi")
)
}}
@ -47,4 +47,4 @@ These presets are for basic testing and not meant to be used in production
{{- else -}}
{{- printf "ERROR: Preset key '%s' invalid. Allowed values are %s" .type (join "," (keys $presets)) | fail -}}
{{- end -}}
{{- end -}}
{{- end -}}

View File

@ -162,7 +162,7 @@ spec:
- name: empty-dir
mountPath: /tmp
subPath: tmp-dir
- name: data
- name: {{ .Values.primary.persistence.volumeName }}
mountPath: {{ .Values.primary.persistence.mountPath }}
{{- if .Values.primary.persistence.subPath }}
subPath: {{ .Values.primary.persistence.subPath }}
@ -494,13 +494,11 @@ spec:
- name: dshm
mountPath: /dev/shm
{{- end }}
{{- if .Values.primary.persistence.enabled }}
- name: data
- name: {{ .Values.primary.persistence.volumeName }}
mountPath: {{ .Values.primary.persistence.mountPath }}
{{- if .Values.primary.persistence.subPath }}
subPath: {{ .Values.primary.persistence.subPath }}
{{- end }}
{{- end }}
{{- if or .Values.primary.configuration .Values.primary.pgHbaConfiguration .Values.primary.existingConfigmap }}
- name: postgresql-config
mountPath: {{ .Values.primary.persistence.mountPath }}/conf
@ -650,11 +648,11 @@ spec:
{{- end }}
{{- end }}
{{- if and .Values.primary.persistence.enabled .Values.primary.persistence.existingClaim }}
- name: data
- name: {{ .Values.primary.persistence.volumeName }}
persistentVolumeClaim:
claimName: {{ tpl .Values.primary.persistence.existingClaim $ }}
{{- else if not .Values.primary.persistence.enabled }}
- name: data
- name: {{ .Values.primary.persistence.volumeName }}
emptyDir: {}
{{- else }}
{{- if .Values.primary.persistentVolumeClaimRetentionPolicy.enabled }}
@ -666,7 +664,7 @@ spec:
- apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: data
name: {{ .Values.primary.persistence.volumeName }}
{{- if .Values.primary.persistence.annotations }}
annotations: {{- include "common.tplvalues.render" (dict "value" .Values.primary.persistence.annotations "context" $) | nindent 10 }}
{{- end }}

View File

@ -413,13 +413,11 @@ spec:
- name: dshm
mountPath: /dev/shm
{{- end }}
{{- if .Values.readReplicas.persistence.enabled }}
- name: data
mountPath: {{ .Values.readReplicas.persistence.mountPath }}
{{- if .Values.readReplicas.persistence.subPath }}
subPath: {{ .Values.readReplicas.persistence.subPath }}
{{- end }}
{{- end }}
{{- if .Values.readReplicas.extraVolumeMounts }}
{{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.extraVolumeMounts "context" $) | nindent 12 }}
{{- end }}

View File

@ -105,7 +105,7 @@ diagnosticMode:
image:
registry: docker.io
repository: bitnami/postgresql
tag: 16.2.0-debian-12-r10
tag: 16.2.0-debian-12-r12
digest: ""
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
@ -738,6 +738,9 @@ primary:
## @param primary.persistence.enabled Enable PostgreSQL Primary data persistence using PVC
##
enabled: true
## @param primary.persistence.volumeName Name to assign the volume
##
volumeName: "data"
## @param primary.persistence.existingClaim Name of an existing PVC to use
##
existingClaim: ""
@ -1392,7 +1395,7 @@ volumePermissions:
image:
registry: docker.io
repository: bitnami/os-shell
tag: 12-debian-12-r17
tag: 12-debian-12-r18
digest: ""
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
@ -1501,7 +1504,7 @@ metrics:
image:
registry: docker.io
repository: bitnami/postgres-exporter
tag: 0.15.0-debian-12-r14
tag: 0.15.0-debian-12-r15
digest: ""
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.

View File

@ -39,4 +39,4 @@ maintainers:
name: redis
sources:
- https://github.com/bitnami/charts/tree/main/bitnami/redis
version: 19.0.1
version: 19.0.2

View File

@ -973,6 +973,15 @@ helm install my-release --set master.persistence.existingClaim=PVC_NAME oci://RE
| `kubectl.image.pullPolicy` | Kubectl image pull policy | `IfNotPresent` |
| `kubectl.image.pullSecrets` | Kubectl pull secrets | `[]` |
| `kubectl.command` | kubectl command to execute | `["/opt/bitnami/scripts/kubectl-scripts/update-master-label.sh"]` |
| `kubectl.containerSecurityContext.enabled` | Enabled kubectl containers' Security Context | `true` |
| `kubectl.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` |
| `kubectl.containerSecurityContext.runAsUser` | Set kubectl containers' Security Context runAsUser | `1001` |
| `kubectl.containerSecurityContext.runAsGroup` | Set kubectl containers' Security Context runAsGroup | `1001` |
| `kubectl.containerSecurityContext.runAsNonRoot` | Set kubectl containers' Security Context runAsNonRoot | `true` |
| `kubectl.containerSecurityContext.allowPrivilegeEscalation` | Set kubectl containers' Security Context allowPrivilegeEscalation | `false` |
| `kubectl.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context read-only root filesystem | `true` |
| `kubectl.containerSecurityContext.seccompProfile.type` | Set kubectl containers' Security Context seccompProfile | `RuntimeDefault` |
| `kubectl.containerSecurityContext.capabilities.drop` | Set kubectl containers' Security Context capabilities to drop | `["ALL"]` |
| `kubectl.resources.limits` | The resources limits for the kubectl containers | `{}` |
| `kubectl.resources.requests` | The requested resources for the kubectl containers | `{}` |
| `sysctl.enabled` | Enable init container to modify Kernel settings | `false` |

View File

@ -598,8 +598,9 @@ spec:
image: {{ template "redis.kubectl.image" . }}
imagePullPolicy: {{ .Values.kubectl.image.pullPolicy | quote }}
command: {{- toYaml .Values.kubectl.command | nindent 12 }}
securityContext:
runAsUser: 0
{{- if .Values.kubectl.containerSecurityContext.enabled }}
securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.kubectl.containerSecurityContext "context" $) | nindent 12 }}
{{- end }}
volumeMounts:
- name: kubectl-shared
mountPath: /etc/shared

View File

@ -2052,6 +2052,30 @@ kubectl:
## @param kubectl.command kubectl command to execute
##
command: ["/opt/bitnami/scripts/kubectl-scripts/update-master-label.sh"]
## Configure Container Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
## @param kubectl.containerSecurityContext.enabled Enabled kubectl containers' Security Context
## @param kubectl.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
## @param kubectl.containerSecurityContext.runAsUser Set kubectl containers' Security Context runAsUser
## @param kubectl.containerSecurityContext.runAsGroup Set kubectl containers' Security Context runAsGroup
## @param kubectl.containerSecurityContext.runAsNonRoot Set kubectl containers' Security Context runAsNonRoot
## @param kubectl.containerSecurityContext.allowPrivilegeEscalation Set kubectl containers' Security Context allowPrivilegeEscalation
## @param kubectl.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context read-only root filesystem
## @param kubectl.containerSecurityContext.seccompProfile.type Set kubectl containers' Security Context seccompProfile
## @param kubectl.containerSecurityContext.capabilities.drop Set kubectl containers' Security Context capabilities to drop
##
containerSecurityContext:
enabled: true
seLinuxOptions: {}
runAsUser: 1001
runAsGroup: 1001
runAsNonRoot: true
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
seccompProfile:
type: RuntimeDefault
capabilities:
drop: ["ALL"]
## Bitnami Kubectl resource requests and limits
## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
## @param kubectl.resources.limits The resources limits for the kubectl containers

View File

@ -1,6 +1,6 @@
dependencies:
- name: common
repository: oci://registry-1.docker.io/bitnamicharts
version: 2.19.0
digest: sha256:ac559eb57710d8904e266424ee364cd686d7e24517871f0c5c67f7c4500c2bcc
generated: "2024-03-15T01:41:57.021991384Z"
version: 2.19.1
digest: sha256:c883732817d9aaa3304f7b3109262aa338959de15b432dc5a2dbde13d2e136a5
generated: "2024-03-27T14:58:35.744336265+01:00"

View File

@ -6,14 +6,14 @@ annotations:
category: ApplicationServer
images: |
- name: jmx-exporter
image: docker.io/bitnami/jmx-exporter:0.20.0-debian-12-r11
image: docker.io/bitnami/jmx-exporter:0.20.0-debian-12-r12
- name: os-shell
image: docker.io/bitnami/os-shell:12-debian-12-r16
image: docker.io/bitnami/os-shell:12-debian-12-r17
- name: tomcat
image: docker.io/bitnami/tomcat:10.1.19-debian-12-r2
image: docker.io/bitnami/tomcat:10.1.20-debian-12-r0
licenses: Apache-2.0
apiVersion: v2
appVersion: 10.1.19
appVersion: 10.1.20
dependencies:
- name: common
repository: file://./charts/common
@ -38,4 +38,4 @@ maintainers:
name: tomcat
sources:
- https://github.com/bitnami/charts/tree/main/bitnami/tomcat
version: 10.17.1
version: 11.0.0

View File

@ -45,250 +45,6 @@ These commands deploy Tomcat on the Kubernetes cluster in the default configurat
> **Tip**: List all releases using `helm list`
## Uninstalling the Chart
To uninstall/delete the `my-release` deployment:
```console
helm delete my-release
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Parameters
### Global parameters
| Name | Description | Value |
| ----------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------- |
| `global.imageRegistry` | Global Docker image registry | `""` |
| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` |
| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` |
| `global.compatibility.openshift.adaptSecurityContext` | Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation) | `disabled` |
### Common parameters
| Name | Description | Value |
| ------------------- | -------------------------------------------------------------------------------------------- | --------------- |
| `kubeVersion` | Force target Kubernetes version (using Helm capabilities if not set) | `""` |
| `nameOverride` | String to partially override common.names.fullname template (will maintain the release name) | `""` |
| `fullnameOverride` | String to fully override common.names.fullname template | `""` |
| `commonLabels` | Add labels to all the deployed resources | `{}` |
| `commonAnnotations` | Add annotations to all the deployed resources | `{}` |
| `clusterDomain` | Kubernetes Cluster Domain | `cluster.local` |
| `extraDeploy` | Array of extra objects to deploy with the release | `[]` |
### Tomcat parameters
| Name | Description | Value |
| ------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------ |
| `image.registry` | Tomcat image registry | `REGISTRY_NAME` |
| `image.repository` | Tomcat image repository | `REPOSITORY_NAME/tomcat` |
| `image.digest` | Tomcat image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `image.pullPolicy` | Tomcat image pull policy | `IfNotPresent` |
| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` |
| `image.debug` | Specify if debug logs should be enabled | `false` |
| `automountServiceAccountToken` | Mount Service Account token in pod | `false` |
| `hostAliases` | Deployment pod host aliases | `[]` |
| `tomcatUsername` | Tomcat admin user | `user` |
| `tomcatPassword` | Tomcat admin password | `""` |
| `existingSecret` | Use existing secret for password details (`tomcatPassword` will be ignored and picked up from this secret). The secret has to contain the key `tomcat-password` | `""` |
| `tomcatAllowRemoteManagement` | Enable remote access to management interface | `0` |
| `catalinaOpts` | Java runtime option used by tomcat JVM | `""` |
| `command` | Override default container command (useful when using custom images) | `[]` |
| `args` | Override default container args (useful when using custom images) | `[]` |
| `extraEnvVars` | Extra environment variables to be set on Tomcat container | `[]` |
| `extraEnvVarsCM` | Name of existing ConfigMap containing extra environment variables | `""` |
| `extraEnvVarsSecret` | Name of existing Secret containing extra environment variables | `""` |
### Tomcat deployment parameters
| Name | Description | Value |
| --------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------- |
| `replicaCount` | Specify number of Tomcat replicas | `1` |
| `deployment.type` | Use Deployment or StatefulSet | `deployment` |
| `updateStrategy.type` | StrategyType | `RollingUpdate` |
| `containerPorts.http` | HTTP port to expose at container level | `8080` |
| `containerExtraPorts` | Extra ports to expose at container level | `[]` |
| `podSecurityContext.enabled` | Enable Tomcat pods' Security Context | `true` |
| `podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` |
| `podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` |
| `podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` |
| `podSecurityContext.fsGroup` | Set Tomcat pod's Security Context fsGroup | `1001` |
| `containerSecurityContext.enabled` | Enabled containers' Security Context | `true` |
| `containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `nil` |
| `containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` |
| `containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` |
| `containerSecurityContext.privileged` | Set container's Security Context privileged | `false` |
| `containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `false` |
| `containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` |
| `containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` |
| `containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` |
| `resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `none` |
| `resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` |
| `livenessProbe.enabled` | Enable livenessProbe | `true` |
| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `120` |
| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` |
| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` |
| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` |
| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` |
| `readinessProbe.enabled` | Enable readinessProbe | `true` |
| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `30` |
| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `5` |
| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `3` |
| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` |
| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` |
| `startupProbe.enabled` | Enable startupProbe | `false` |
| `startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` |
| `startupProbe.periodSeconds` | Period seconds for startupProbe | `5` |
| `startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `3` |
| `startupProbe.failureThreshold` | Failure threshold for startupProbe | `3` |
| `startupProbe.successThreshold` | Success threshold for startupProbe | `1` |
| `customLivenessProbe` | Override default liveness probe | `{}` |
| `customReadinessProbe` | Override default readiness probe | `{}` |
| `customStartupProbe` | Override default startup probe | `{}` |
| `podLabels` | Extra labels for Tomcat pods | `{}` |
| `podAnnotations` | Annotations for Tomcat pods | `{}` |
| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` |
| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `nodeAffinityPreset.key` | Node label key to match. Ignored if `affinity` is set. | `""` |
| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` |
| `affinity` | Affinity for pod assignment. Evaluated as a template. | `{}` |
| `nodeSelector` | Node labels for pod assignment. Evaluated as a template. | `{}` |
| `schedulerName` | Alternative scheduler | `""` |
| `lifecycleHooks` | Override default etcd container hooks | `{}` |
| `podManagementPolicy` | podManagementPolicy to manage scaling operation of pods (only in StatefulSet mode) | `""` |
| `tolerations` | Tolerations for pod assignment. Evaluated as a template. | `[]` |
| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` |
| `extraPodSpec` | Optionally specify extra PodSpec | `{}` |
| `extraVolumes` | Optionally specify extra list of additional volumes for Tomcat pods in Deployment | `[]` |
| `extraVolumeClaimTemplates` | Optionally specify extra list of additional volume claim templates for Tomcat pods in StatefulSet | `[]` |
| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for Tomcat container(s) | `[]` |
| `initContainers` | Add init containers to the Tomcat pods. | `[]` |
| `sidecars` | Add sidecars to the Tomcat pods. | `[]` |
| `persistence.enabled` | Enable persistence | `true` |
| `persistence.storageClass` | PVC Storage Class for Tomcat volume | `""` |
| `persistence.annotations` | Persistent Volume Claim annotations | `{}` |
| `persistence.accessModes` | PVC Access Modes for Tomcat volume | `["ReadWriteOnce"]` |
| `persistence.size` | PVC Storage Request for Tomcat volume | `8Gi` |
| `persistence.existingClaim` | An Existing PVC name for Tomcat volume | `""` |
| `persistence.selectorLabels` | Selector labels to use in volume claim template in statefulset | `{}` |
| `networkPolicy.enabled` | Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. | `false` |
| `networkPolicy.allowExternal` | Don't require client label for connections | `true` |
| `networkPolicy.explicitNamespacesSelector` | A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed | `{}` |
| `serviceAccount.create` | Enable creation of ServiceAccount for Tomcat pod | `true` |
| `serviceAccount.name` | The name of the ServiceAccount to use. | `""` |
| `serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `false` |
| `serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` |
### Traffic Exposure parameters
| Name | Description | Value |
| ---------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | ------------------------ |
| `service.type` | Kubernetes Service type | `LoadBalancer` |
| `service.ports.http` | Service HTTP port | `80` |
| `service.nodePorts.http` | Kubernetes http node port | `""` |
| `service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` |
| `service.loadBalancerIP` | Port Use serviceLoadBalancerIP to request a specific static IP, otherwise leave blank | `""` |
| `service.clusterIP` | Service Cluster IP | `""` |
| `service.loadBalancerSourceRanges` | Service Load Balancer sources | `[]` |
| `service.externalTrafficPolicy` | Enable client source IP preservation | `Cluster` |
| `service.annotations` | Annotations for Tomcat service | `{}` |
| `service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` |
| `service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` |
| `service.headless.annotations` | Annotations for the headless service. | `{}` |
| `ingress.enabled` | Enable ingress controller resource | `false` |
| `ingress.hostname` | Default host for the ingress resource | `tomcat.local` |
| `ingress.annotations` | Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. | `{}` |
| `ingress.tls` | Enable TLS configuration for the hostname defined at `ingress.hostname` parameter | `false` |
| `ingress.extraHosts` | The list of additional hostnames to be covered with this ingress record. | `[]` |
| `ingress.extraTls` | The tls configuration for additional hostnames to be covered with this ingress record. | `[]` |
| `ingress.extraPaths` | Any additional arbitrary paths that may need to be added to the ingress under the main host. | `[]` |
| `ingress.selfSigned` | Create a TLS secret for this ingress record using self-signed certificates generated by Helm | `false` |
| `ingress.ingressClassName` | IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) | `""` |
| `ingress.secrets` | If you're providing your own certificates, please use this to add the certificates as secrets | `[]` |
| `ingress.extraRules` | Additional rules to be covered with this ingress record | `[]` |
| `ingress.apiVersion` | Force Ingress API version (automatically detected if not set) | `""` |
| `ingress.path` | Ingress path | `/` |
| `ingress.pathType` | Ingress path type | `ImplementationSpecific` |
### Volume Permissions parameters
| Name | Description | Value |
| ------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------- |
| `volumePermissions.enabled` | Enable init container that changes volume permissions in the data directory | `false` |
| `volumePermissions.image.registry` | Init container volume-permissions image registry | `REGISTRY_NAME` |
| `volumePermissions.image.repository` | Init container volume-permissions image repository | `REPOSITORY_NAME/os-shell` |
| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` |
| `volumePermissions.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` |
| `volumePermissions.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production). | `none` |
| `volumePermissions.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` |
### Metrics parameters
| Name | Description | Value |
| --------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `metrics.jmx.enabled` | Whether or not to expose JMX metrics to Prometheus | `false` |
| `metrics.jmx.catalinaOpts` | custom option used to enabled JMX on tomcat jvm evaluated as template | `-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.port=5555 -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.local.only=true` |
| `metrics.jmx.image.registry` | JMX exporter image registry | `REGISTRY_NAME` |
| `metrics.jmx.image.repository` | JMX exporter image repository | `REPOSITORY_NAME/jmx-exporter` |
| `metrics.jmx.image.digest` | JMX exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `metrics.jmx.image.pullPolicy` | JMX exporter image pull policy | `IfNotPresent` |
| `metrics.jmx.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` |
| `metrics.jmx.config` | Configuration file for JMX exporter | `""` |
| `metrics.jmx.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` |
| `metrics.jmx.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `nil` |
| `metrics.jmx.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` |
| `metrics.jmx.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` |
| `metrics.jmx.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` |
| `metrics.jmx.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `false` |
| `metrics.jmx.containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` |
| `metrics.jmx.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` |
| `metrics.jmx.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` |
| `metrics.jmx.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if metrics.jmx.resources is set (metrics.jmx.resources is recommended for production). | `none` |
| `metrics.jmx.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` |
| `metrics.jmx.ports.metrics` | JMX Exporter container metrics ports | `5556` |
| `metrics.jmx.existingConfigmap` | Name of existing ConfigMap with JMX exporter configuration | `""` |
| `metrics.podMonitor.podTargetLabels` | Used to keep given pod's labels in target | `[]` |
| `metrics.podMonitor.enabled` | Create PodMonitor Resource for scraping metrics using PrometheusOperator | `false` |
| `metrics.podMonitor.namespace` | Optional namespace in which Prometheus is running | `""` |
| `metrics.podMonitor.interval` | Specify the interval at which metrics should be scraped | `30s` |
| `metrics.podMonitor.scrapeTimeout` | Specify the timeout after which the scrape is ended | `30s` |
| `metrics.podMonitor.additionalLabels` | Additional labels that can be used so PodMonitors will be discovered by Prometheus | `{}` |
| `metrics.podMonitor.scheme` | Scheme to use for scraping | `http` |
| `metrics.podMonitor.tlsConfig` | TLS configuration used for scrape endpoints used by Prometheus | `{}` |
| `metrics.podMonitor.relabelings` | Prometheus relabeling rules | `[]` |
| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` |
| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` |
| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | `""` |
| `metrics.prometheusRule.rules` | Create specified [Rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) | `[]` |
The above parameters map to the env variables defined in [bitnami/tomcat](https://github.com/bitnami/containers/tree/main/bitnami/tomcat). For more information please refer to the [bitnami/tomcat](https://github.com/bitnami/containers/tree/main/bitnami/tomcat) image documentation.
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
```console
helm install my-release \
--set tomcatUsername=manager,tomcatPassword=password oci://REGISTRY_NAME/REPOSITORY_NAME/tomcat
```
> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`.
The above command sets the Tomcat management username and password to `manager` and `password` respectively.
> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available.
Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,
```console
helm install my-release -f values.yaml oci://REGISTRY_NAME/REPOSITORY_NAME/tomcat
```
> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`.
> **Tip**: You can use the default [values.yaml](https://github.com/bitnami/charts/tree/main/bitnami/tomcat/values.yaml)
## Configuration and installation details
### Resource requests and limits
@ -382,12 +138,264 @@ As an alternative, this chart supports using an init container to change the own
You can enable this init container by setting `volumePermissions.enabled` to `true`.
## Parameters
### Global parameters
| Name | Description | Value |
| ----------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------- |
| `global.imageRegistry` | Global Docker image registry | `""` |
| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` |
| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` |
| `global.compatibility.openshift.adaptSecurityContext` | Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation) | `disabled` |
### Common parameters
| Name | Description | Value |
| ------------------- | -------------------------------------------------------------------------------------------- | --------------- |
| `kubeVersion` | Force target Kubernetes version (using Helm capabilities if not set) | `""` |
| `nameOverride` | String to partially override common.names.fullname template (will maintain the release name) | `""` |
| `fullnameOverride` | String to fully override common.names.fullname template | `""` |
| `commonLabels` | Add labels to all the deployed resources | `{}` |
| `commonAnnotations` | Add annotations to all the deployed resources | `{}` |
| `clusterDomain` | Kubernetes Cluster Domain | `cluster.local` |
| `extraDeploy` | Array of extra objects to deploy with the release | `[]` |
### Tomcat parameters
| Name | Description | Value |
| ------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------ |
| `image.registry` | Tomcat image registry | `REGISTRY_NAME` |
| `image.repository` | Tomcat image repository | `REPOSITORY_NAME/tomcat` |
| `image.digest` | Tomcat image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `image.pullPolicy` | Tomcat image pull policy | `IfNotPresent` |
| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` |
| `image.debug` | Specify if debug logs should be enabled | `false` |
| `automountServiceAccountToken` | Mount Service Account token in pod | `false` |
| `hostAliases` | Deployment pod host aliases | `[]` |
| `tomcatUsername` | Tomcat admin user | `user` |
| `tomcatPassword` | Tomcat admin password | `""` |
| `existingSecret` | Use existing secret for password details (`tomcatPassword` will be ignored and picked up from this secret). The secret has to contain the key `tomcat-password` | `""` |
| `tomcatAllowRemoteManagement` | Enable remote access to management interface | `0` |
| `catalinaOpts` | Java runtime option used by tomcat JVM | `""` |
| `command` | Override default container command (useful when using custom images) | `[]` |
| `args` | Override default container args (useful when using custom images) | `[]` |
| `extraEnvVars` | Extra environment variables to be set on Tomcat container | `[]` |
| `extraEnvVarsCM` | Name of existing ConfigMap containing extra environment variables | `""` |
| `extraEnvVarsSecret` | Name of existing Secret containing extra environment variables | `""` |
### Tomcat deployment parameters
| Name | Description | Value |
| --------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------- |
| `replicaCount` | Specify number of Tomcat replicas | `1` |
| `deployment.type` | Use Deployment or StatefulSet | `deployment` |
| `updateStrategy.type` | StrategyType | `RollingUpdate` |
| `containerPorts.http` | HTTP port to expose at container level | `8080` |
| `containerExtraPorts` | Extra ports to expose at container level | `[]` |
| `podSecurityContext.enabled` | Enable Tomcat pods' Security Context | `true` |
| `podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` |
| `podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` |
| `podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` |
| `podSecurityContext.fsGroup` | Set Tomcat pod's Security Context fsGroup | `1001` |
| `containerSecurityContext.enabled` | Enabled containers' Security Context | `true` |
| `containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` |
| `containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` |
| `containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` |
| `containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` |
| `containerSecurityContext.privileged` | Set container's Security Context privileged | `false` |
| `containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `true` |
| `containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` |
| `containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` |
| `containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` |
| `resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `micro` |
| `resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` |
| `livenessProbe.enabled` | Enable livenessProbe | `true` |
| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `120` |
| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` |
| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` |
| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` |
| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` |
| `readinessProbe.enabled` | Enable readinessProbe | `true` |
| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `30` |
| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `5` |
| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `3` |
| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` |
| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` |
| `startupProbe.enabled` | Enable startupProbe | `false` |
| `startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` |
| `startupProbe.periodSeconds` | Period seconds for startupProbe | `5` |
| `startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `3` |
| `startupProbe.failureThreshold` | Failure threshold for startupProbe | `3` |
| `startupProbe.successThreshold` | Success threshold for startupProbe | `1` |
| `customLivenessProbe` | Override default liveness probe | `{}` |
| `customReadinessProbe` | Override default readiness probe | `{}` |
| `customStartupProbe` | Override default startup probe | `{}` |
| `podLabels` | Extra labels for Tomcat pods | `{}` |
| `podAnnotations` | Annotations for Tomcat pods | `{}` |
| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` |
| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `nodeAffinityPreset.key` | Node label key to match. Ignored if `affinity` is set. | `""` |
| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` |
| `affinity` | Affinity for pod assignment. Evaluated as a template. | `{}` |
| `nodeSelector` | Node labels for pod assignment. Evaluated as a template. | `{}` |
| `schedulerName` | Alternative scheduler | `""` |
| `lifecycleHooks` | Override default etcd container hooks | `{}` |
| `podManagementPolicy` | podManagementPolicy to manage scaling operation of pods (only in StatefulSet mode) | `""` |
| `tolerations` | Tolerations for pod assignment. Evaluated as a template. | `[]` |
| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` |
| `extraPodSpec` | Optionally specify extra PodSpec | `{}` |
| `extraVolumes` | Optionally specify extra list of additional volumes for Tomcat pods in Deployment | `[]` |
| `extraVolumeClaimTemplates` | Optionally specify extra list of additional volume claim templates for Tomcat pods in StatefulSet | `[]` |
| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for Tomcat container(s) | `[]` |
| `initContainers` | Add init containers to the Tomcat pods. | `[]` |
| `sidecars` | Add sidecars to the Tomcat pods. | `[]` |
| `persistence.enabled` | Enable persistence | `true` |
| `persistence.storageClass` | PVC Storage Class for Tomcat volume | `""` |
| `persistence.annotations` | Persistent Volume Claim annotations | `{}` |
| `persistence.accessModes` | PVC Access Modes for Tomcat volume | `["ReadWriteOnce"]` |
| `persistence.size` | PVC Storage Request for Tomcat volume | `8Gi` |
| `persistence.existingClaim` | An Existing PVC name for Tomcat volume | `""` |
| `persistence.selectorLabels` | Selector labels to use in volume claim template in statefulset | `{}` |
| `networkPolicy.enabled` | Specifies whether a NetworkPolicy should be created | `true` |
| `networkPolicy.allowExternal` | Don't require server label for connections | `true` |
| `networkPolicy.allowExternalEgress` | Allow the pod to access any range of port and all destinations. | `true` |
| `networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolice | `[]` |
| `networkPolicy.extraEgress` | Add extra ingress rules to the NetworkPolicy | `[]` |
| `networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces | `{}` |
| `networkPolicy.ingressNSPodMatchLabels` | Pod labels to match to allow traffic from other namespaces | `{}` |
| `serviceAccount.create` | Enable creation of ServiceAccount for Tomcat pod | `true` |
| `serviceAccount.name` | The name of the ServiceAccount to use. | `""` |
| `serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `false` |
| `serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` |
### Traffic Exposure parameters
| Name | Description | Value |
| ---------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | ------------------------ |
| `service.type` | Kubernetes Service type | `LoadBalancer` |
| `service.ports.http` | Service HTTP port | `80` |
| `service.nodePorts.http` | Kubernetes http node port | `""` |
| `service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` |
| `service.loadBalancerIP` | Port Use serviceLoadBalancerIP to request a specific static IP, otherwise leave blank | `""` |
| `service.clusterIP` | Service Cluster IP | `""` |
| `service.loadBalancerSourceRanges` | Service Load Balancer sources | `[]` |
| `service.externalTrafficPolicy` | Enable client source IP preservation | `Cluster` |
| `service.annotations` | Annotations for Tomcat service | `{}` |
| `service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` |
| `service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` |
| `service.headless.annotations` | Annotations for the headless service. | `{}` |
| `ingress.enabled` | Enable ingress controller resource | `false` |
| `ingress.hostname` | Default host for the ingress resource | `tomcat.local` |
| `ingress.annotations` | Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. | `{}` |
| `ingress.tls` | Enable TLS configuration for the hostname defined at `ingress.hostname` parameter | `false` |
| `ingress.extraHosts` | The list of additional hostnames to be covered with this ingress record. | `[]` |
| `ingress.extraTls` | The tls configuration for additional hostnames to be covered with this ingress record. | `[]` |
| `ingress.extraPaths` | Any additional arbitrary paths that may need to be added to the ingress under the main host. | `[]` |
| `ingress.selfSigned` | Create a TLS secret for this ingress record using self-signed certificates generated by Helm | `false` |
| `ingress.ingressClassName` | IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) | `""` |
| `ingress.secrets` | If you're providing your own certificates, please use this to add the certificates as secrets | `[]` |
| `ingress.extraRules` | Additional rules to be covered with this ingress record | `[]` |
| `ingress.apiVersion` | Force Ingress API version (automatically detected if not set) | `""` |
| `ingress.path` | Ingress path | `/` |
| `ingress.pathType` | Ingress path type | `ImplementationSpecific` |
### Volume Permissions parameters
| Name | Description | Value |
| ------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------- |
| `volumePermissions.enabled` | Enable init container that changes volume permissions in the data directory | `false` |
| `volumePermissions.image.registry` | Init container volume-permissions image registry | `REGISTRY_NAME` |
| `volumePermissions.image.repository` | Init container volume-permissions image repository | `REPOSITORY_NAME/os-shell` |
| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` |
| `volumePermissions.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` |
| `volumePermissions.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production). | `none` |
| `volumePermissions.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` |
### Metrics parameters
| Name | Description | Value |
| --------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `metrics.jmx.enabled` | Whether or not to expose JMX metrics to Prometheus | `false` |
| `metrics.jmx.catalinaOpts` | custom option used to enabled JMX on tomcat jvm evaluated as template | `-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.port=5555 -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.local.only=true` |
| `metrics.jmx.image.registry` | JMX exporter image registry | `REGISTRY_NAME` |
| `metrics.jmx.image.repository` | JMX exporter image repository | `REPOSITORY_NAME/jmx-exporter` |
| `metrics.jmx.image.digest` | JMX exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `metrics.jmx.image.pullPolicy` | JMX exporter image pull policy | `IfNotPresent` |
| `metrics.jmx.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` |
| `metrics.jmx.config` | Configuration file for JMX exporter | `""` |
| `metrics.jmx.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` |
| `metrics.jmx.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` |
| `metrics.jmx.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` |
| `metrics.jmx.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` |
| `metrics.jmx.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` |
| `metrics.jmx.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` |
| `metrics.jmx.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `true` |
| `metrics.jmx.containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` |
| `metrics.jmx.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` |
| `metrics.jmx.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` |
| `metrics.jmx.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if metrics.jmx.resources is set (metrics.jmx.resources is recommended for production). | `none` |
| `metrics.jmx.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` |
| `metrics.jmx.ports.metrics` | JMX Exporter container metrics ports | `5556` |
| `metrics.jmx.existingConfigmap` | Name of existing ConfigMap with JMX exporter configuration | `""` |
| `metrics.podMonitor.podTargetLabels` | Used to keep given pod's labels in target | `[]` |
| `metrics.podMonitor.enabled` | Create PodMonitor Resource for scraping metrics using PrometheusOperator | `false` |
| `metrics.podMonitor.namespace` | Optional namespace in which Prometheus is running | `""` |
| `metrics.podMonitor.interval` | Specify the interval at which metrics should be scraped | `30s` |
| `metrics.podMonitor.scrapeTimeout` | Specify the timeout after which the scrape is ended | `30s` |
| `metrics.podMonitor.additionalLabels` | Additional labels that can be used so PodMonitors will be discovered by Prometheus | `{}` |
| `metrics.podMonitor.scheme` | Scheme to use for scraping | `http` |
| `metrics.podMonitor.tlsConfig` | TLS configuration used for scrape endpoints used by Prometheus | `{}` |
| `metrics.podMonitor.relabelings` | Prometheus relabeling rules | `[]` |
| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` |
| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` |
| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | `""` |
| `metrics.prometheusRule.rules` | Create specified [Rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) | `[]` |
The above parameters map to the env variables defined in [bitnami/tomcat](https://github.com/bitnami/containers/tree/main/bitnami/tomcat). For more information please refer to the [bitnami/tomcat](https://github.com/bitnami/containers/tree/main/bitnami/tomcat) image documentation.
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
```console
helm install my-release \
--set tomcatUsername=manager,tomcatPassword=password oci://REGISTRY_NAME/REPOSITORY_NAME/tomcat
```
> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`.
The above command sets the Tomcat management username and password to `manager` and `password` respectively.
> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available.
Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,
```console
helm install my-release -f values.yaml oci://REGISTRY_NAME/REPOSITORY_NAME/tomcat
```
> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`.
> **Tip**: You can use the default [values.yaml](https://github.com/bitnami/charts/tree/main/bitnami/tomcat/values.yaml)
## Troubleshooting
Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues).
## Upgrading
### To 11.0.0
This major bump changes the following security defaults:
- `runAsGroup` is changed from `0` to `1001`
- `readOnlyRootFilesystem` is set to `true`
- `resourcesPreset` is changed from `none` to the minimum size working in our test suites (NOTE: `resourcesPreset` is not meant for production usage, but `resources` adapted to your use case).
- `global.compatibility.openshift.adaptSecurityContext` is changed from `disabled` to `auto`.
- The `networkPolicy` section has been normalized amongst all Bitnami charts. Compared to the previous approach, the values section has been simplified (check the Parameters section) and now it set to `enabled=true` by default. Egress traffic is allowed by default and ingress traffic is allowed by all pods but only to the ports set in `containerPorts`.
This could potentially break any customization or init scripts used in your deployment. If this is the case, change the default values to the previous ones.
### To 10.0.0
Some of the chart values were changed to adapt to the latest Bitnami standards. More specifically:

View File

@ -2,7 +2,7 @@ annotations:
category: Infrastructure
licenses: Apache-2.0
apiVersion: v2
appVersion: 2.19.0
appVersion: 2.19.1
description: A Library Helm Chart for grouping common logic between bitnami charts.
This chart is not deployable by itself.
home: https://bitnami.com
@ -20,4 +20,4 @@ name: common
sources:
- https://github.com/bitnami/charts
type: library
version: 2.19.0
version: 2.19.1

View File

@ -11,7 +11,7 @@ These presets are for basic testing and not meant to be used in production
{{ include "common.resources.preset" (dict "type" "nano") -}}
*/}}
{{- define "common.resources.preset" -}}
{{/* The limits are the requests increased by 50% (except ephemeral-storage)*/}}
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
{{- $presets := dict
"nano" (dict
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
@ -34,11 +34,11 @@ These presets are for basic testing and not meant to be used in production
"limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "1024Mi")
)
"xlarge" (dict
"requests" (dict "cpu" "2.0" "memory" "4096Mi" "ephemeral-storage" "50Mi")
"requests" (dict "cpu" "1.5" "memory" "4096Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "1024Mi")
)
"2xlarge" (dict
"requests" (dict "cpu" "4.0" "memory" "8192Mi" "ephemeral-storage" "50Mi")
"requests" (dict "cpu" "1.5" "memory" "4096Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "1024Mi")
)
}}
@ -47,4 +47,4 @@ These presets are for basic testing and not meant to be used in production
{{- else -}}
{{- printf "ERROR: Preset key '%s' invalid. Allowed values are %s" .type (join "," (keys $presets)) | fail -}}
{{- end -}}
{{- end -}}
{{- end -}}

View File

@ -150,6 +150,21 @@ containers:
volumeMounts:
- name: data
mountPath: /bitnami/tomcat
- name: empty-dir
mountPath: /opt/bitnami/tomcat/temp
subPath: app-tmp-dir
- name: empty-dir
mountPath: /opt/bitnami/tomcat/conf
subPath: app-conf-dir
- name: empty-dir
mountPath: /opt/bitnami/tomcat/logs
subPath: app-logs-dir
- name: empty-dir
mountPath: /opt/bitnami/tomcat/work
subPath: app-work-dir
- name: empty-dir
mountPath: /tmp
subPath: tmp-dir
{{- if .Values.extraVolumeMounts }}
{{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 6 }}
{{- end }}
@ -182,11 +197,16 @@ containers:
volumeMounts:
- name: jmx-config
mountPath: /etc/jmx-tomcat
- name: empty-dir
mountPath: /tmp
subPath: tmp-dir
{{- end }}
{{- if .Values.sidecars }}
{{- include "common.tplvalues.render" ( dict "value" .Values.sidecars "context" $) | nindent 2 }}
{{- end }}
volumes:
- name: empty-dir
emptyDir: {}
{{- if (eq .Values.deployment.type "deployment") }}
{{- if and .Values.persistence.enabled }}
- name: data

View File

@ -8,34 +8,85 @@ kind: NetworkPolicy
apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }}
metadata:
name: {{ template "common.names.fullname" . }}
namespace: {{ include "common.names.namespace" . | quote }}
labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
namespace: {{ .Release.Namespace }}
spec:
{{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }}
podSelector:
matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }}
policyTypes:
- Ingress
- Egress
{{- if .Values.networkPolicy.allowExternalEgress }}
egress:
- {}
{{- else }}
egress:
# Allow dns resolution
- ports:
- port: 53
protocol: UDP
- port: 53
protocol: TCP
# Allow outbound connections to MariaDB
- ports:
- port: {{ include "wordpress.databasePort" . }}
{{- if .Values.mariadb.enabled }}
to:
- podSelector:
matchLabels:
app.kubernetes.io/name: mariadb
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{- if .Values.wordpressConfigureCache }}
# Allow outbound connections to Memcached
- ports:
- port: {{ include "wordpress.cachePort" . }}
{{- if .Values.memcached.enabled }}
to:
- podSelector:
matchLabels:
app.kubernetes.io/name: memcached
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{- end }}
{{- if .Values.networkPolicy.extraEgress }}
{{- include "common.tplvalues.render" ( dict "value" .Values.networkPolicy.extraEgress "context" $ ) | nindent 4 }}
{{- end }}
{{- end }}
ingress:
# Allow inbound connections
- ports: {{- include "tomcat.ports" . | nindent 8 }}
{{- if not .Values.networkPolicy.allowExternal }}
from:
- podSelector:
matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 14 }}
- podSelector:
matchLabels:
{{ template "common.names.fullname" . }}-client: "true"
{{- if .Values.networkPolicy.explicitNamespacesSelector }}
namespaceSelector:
{{ toYaml .Values.networkPolicy.explicitNamespacesSelector | indent 12 }}
{{- if .Values.networkPolicy.ingressNSMatchLabels }}
- namespaceSelector:
matchLabels:
{{- range $key, $value := .Values.networkPolicy.ingressNSMatchLabels }}
{{ $key | quote }}: {{ $value | quote }}
{{- end }}
{{- if .Values.networkPolicy.ingressNSPodMatchLabels }}
podSelector:
matchLabels:
{{- range $key, $value := .Values.networkPolicy.ingressNSPodMatchLabels }}
{{ $key | quote }}: {{ $value | quote }}
{{- end }}
{{- end }}
# Allow communication between Tomcat's POD
- podSelector:
matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 14 }}
{{- end }}
{{- end }}
{{- if .Values.metrics.jmx.enabled }}
# Allow prometheus scrapes
- ports:
- port: {{ .Values.metrics.jmx.ports.metrics }}
{{- end }}
{{- if .Values.networkPolicy.extraIngress }}
{{- include "common.tplvalues.render" ( dict "value" .Values.networkPolicy.extraIngress "context" $ ) | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -68,7 +68,7 @@ extraDeploy: []
image:
registry: docker.io
repository: bitnami/tomcat
tag: 10.1.19-debian-12-r2
tag: 10.1.20-debian-12-r0
digest: ""
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
@ -180,6 +180,7 @@ podSecurityContext:
## @param containerSecurityContext.enabled Enabled containers' Security Context
## @param containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
## @param containerSecurityContext.runAsUser Set containers' Security Context runAsUser
## @param containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
## @param containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot
## @param containerSecurityContext.privileged Set container's Security Context privileged
## @param containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
@ -189,11 +190,12 @@ podSecurityContext:
##
containerSecurityContext:
enabled: true
seLinuxOptions: null
seLinuxOptions: {}
runAsUser: 1001
runAsGroup: 1001
runAsNonRoot: true
privileged: false
readOnlyRootFilesystem: false
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
@ -208,7 +210,7 @@ containerSecurityContext:
## @param resourcesPreset Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
##
resourcesPreset: "none"
resourcesPreset: "micro"
## @param resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
## Example:
## resources:
@ -406,31 +408,61 @@ persistence:
## Applicable when deployment.type is statefulset
##
selectorLabels: {}
## Network Policy configuration
## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
##
networkPolicy:
## @param networkPolicy.enabled Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now.
## @param networkPolicy.enabled Specifies whether a NetworkPolicy should be created
##
enabled: false
## @param networkPolicy.allowExternal Don't require client label for connections
enabled: true
## @param networkPolicy.allowExternal Don't require server label for connections
## The Policy model to apply. When set to false, only pods with the correct
## client label will have network access to every tomcat port defined on containerPort and containerExtraPorts.
## When true, tomcat will accept connections from any source
## server label will have network access to the ports server is listening
## on. When true, server will accept connections from any source
## (with the correct destination port).
##
allowExternal: true
## @param networkPolicy.explicitNamespacesSelector A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed
## If explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace
## and that match other criteria, the ones that have the good label, can reach the tomcat.
## But sometimes, we want the tomcat to be accessible to clients from other namespaces, in this case, we can use this
## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added.
## @param networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
##
## Example:
## explicitNamespacesSelector:
## matchLabels:
## role: frontend
## matchExpressions:
## - {key: role, operator: In, values: [frontend]}
allowExternalEgress: true
## @param networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolice
## e.g:
## extraIngress:
## - ports:
## - port: 1234
## from:
## - podSelector:
## - matchLabels:
## - role: frontend
## - podSelector:
## - matchExpressions:
## - key: role
## operator: In
## values:
## - frontend
extraIngress: []
## @param networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy
## e.g:
## extraEgress:
## - ports:
## - port: 1234
## to:
## - podSelector:
## - matchLabels:
## - role: frontend
## - podSelector:
## - matchExpressions:
## - key: role
## operator: In
## values:
## - frontend
##
explicitNamespacesSelector: {}
extraEgress: []
## @param networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
## @param networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
##
ingressNSMatchLabels: {}
ingressNSPodMatchLabels: {}
## Service Account
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
##
@ -636,7 +668,7 @@ volumePermissions:
image:
registry: docker.io
repository: bitnami/os-shell
tag: 12-debian-12-r16
tag: 12-debian-12-r17
digest: ""
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
@ -694,7 +726,7 @@ metrics:
image:
registry: docker.io
repository: bitnami/jmx-exporter
tag: 0.20.0-debian-12-r11
tag: 0.20.0-debian-12-r12
digest: ""
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
@ -723,6 +755,7 @@ metrics:
## @param metrics.jmx.containerSecurityContext.enabled Enabled containers' Security Context
## @param metrics.jmx.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
## @param metrics.jmx.containerSecurityContext.runAsUser Set containers' Security Context runAsUser
## @param metrics.jmx.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup
## @param metrics.jmx.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot
## @param metrics.jmx.containerSecurityContext.privileged Set container's Security Context privileged
## @param metrics.jmx.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem
@ -731,11 +764,12 @@ metrics:
## @param metrics.jmx.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
containerSecurityContext:
enabled: true
seLinuxOptions: null
seLinuxOptions: {}
runAsUser: 1001
runAsGroup: 1001
runAsNonRoot: true
privileged: false
readOnlyRootFilesystem: false
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]

View File

@ -1,12 +1,12 @@
dependencies:
- name: memcached
repository: oci://registry-1.docker.io/bitnamicharts
version: 7.0.2
version: 7.0.3
- name: mariadb
repository: oci://registry-1.docker.io/bitnamicharts
version: 17.0.1
version: 18.0.0
- name: common
repository: oci://registry-1.docker.io/bitnamicharts
version: 2.19.0
digest: sha256:3b4e997af36fdaa0116bd43fdbe2bb7575f280e86a1ede66430a47115b7f89ba
generated: "2024-03-20T16:16:30.974197+01:00"
version: 2.19.1
digest: sha256:9c6b4b3220792623bf8c5886c162486d5e1d95d2de3f139d8763dd76076291d7
generated: "2024-04-02T16:36:02.420521+02:00"

View File

@ -6,14 +6,14 @@ annotations:
category: CMS
images: |
- name: apache-exporter
image: docker.io/bitnami/apache-exporter:1.0.7-debian-12-r0
image: docker.io/bitnami/apache-exporter:1.0.7-debian-12-r1
- name: os-shell
image: docker.io/bitnami/os-shell:12-debian-12-r17
image: docker.io/bitnami/os-shell:12-debian-12-r18
- name: wordpress
image: docker.io/bitnami/wordpress:6.4.3-debian-12-r28
image: docker.io/bitnami/wordpress:6.5.0-debian-12-r0
licenses: Apache-2.0
apiVersion: v2
appVersion: 6.4.3
appVersion: 6.5.0
dependencies:
- condition: memcached.enabled
name: memcached
@ -22,7 +22,7 @@ dependencies:
- condition: mariadb.enabled
name: mariadb
repository: file://./charts/mariadb
version: 17.x.x
version: 18.x.x
- name: common
repository: file://./charts/common
tags:
@ -47,4 +47,4 @@ maintainers:
name: wordpress
sources:
- https://github.com/bitnami/charts/tree/main/bitnami/wordpress
version: 21.0.6
version: 22.1.0

Some files were not shown because too many files have changed in this diff Show More