diff --git a/assets/argo/argo-cd-6.7.3.tgz b/assets/argo/argo-cd-6.7.3.tgz index 80414f826..455490dff 100644 Binary files a/assets/argo/argo-cd-6.7.3.tgz and b/assets/argo/argo-cd-6.7.3.tgz differ diff --git a/assets/argo/argo-cd-6.7.8.tgz b/assets/argo/argo-cd-6.7.8.tgz new file mode 100644 index 000000000..fdc0f57a2 Binary files /dev/null and b/assets/argo/argo-cd-6.7.8.tgz differ diff --git a/assets/bitnami/cassandra-11.0.1.tgz b/assets/bitnami/cassandra-11.0.1.tgz new file mode 100644 index 000000000..e6ad8003e Binary files /dev/null and b/assets/bitnami/cassandra-11.0.1.tgz differ diff --git a/assets/bitnami/kafka-28.0.1.tgz b/assets/bitnami/kafka-28.0.1.tgz new file mode 100644 index 000000000..8504fb5d9 Binary files /dev/null and b/assets/bitnami/kafka-28.0.1.tgz differ diff --git a/assets/bitnami/mariadb-18.0.1.tgz b/assets/bitnami/mariadb-18.0.1.tgz new file mode 100644 index 000000000..bc997bd62 Binary files /dev/null and b/assets/bitnami/mariadb-18.0.1.tgz differ diff --git a/assets/bitnami/mysql-10.1.1.tgz b/assets/bitnami/mysql-10.1.1.tgz new file mode 100644 index 000000000..c92502d2d Binary files /dev/null and b/assets/bitnami/mysql-10.1.1.tgz differ diff --git a/assets/bitnami/postgresql-15.2.2.tgz b/assets/bitnami/postgresql-15.2.2.tgz new file mode 100644 index 000000000..cd5d70db4 Binary files /dev/null and b/assets/bitnami/postgresql-15.2.2.tgz differ diff --git a/assets/bitnami/redis-19.0.2.tgz b/assets/bitnami/redis-19.0.2.tgz new file mode 100644 index 000000000..7568dfcce Binary files /dev/null and b/assets/bitnami/redis-19.0.2.tgz differ diff --git a/assets/bitnami/tomcat-11.0.0.tgz b/assets/bitnami/tomcat-11.0.0.tgz new file mode 100644 index 000000000..3e5580b5b Binary files /dev/null and b/assets/bitnami/tomcat-11.0.0.tgz differ diff --git a/assets/bitnami/wordpress-22.1.0.tgz b/assets/bitnami/wordpress-22.1.0.tgz new file mode 100644 index 000000000..293bae9a8 Binary files /dev/null and b/assets/bitnami/wordpress-22.1.0.tgz differ diff --git a/assets/bitnami/zookeeper-13.1.0.tgz b/assets/bitnami/zookeeper-13.1.0.tgz new file mode 100644 index 000000000..65eed0326 Binary files /dev/null and b/assets/bitnami/zookeeper-13.1.0.tgz differ diff --git a/assets/cockroach-labs/cockroachdb-12.0.3.tgz b/assets/cockroach-labs/cockroachdb-12.0.3.tgz new file mode 100644 index 000000000..19a22604a Binary files /dev/null and b/assets/cockroach-labs/cockroachdb-12.0.3.tgz differ diff --git a/assets/datadog/datadog-3.59.4.tgz b/assets/datadog/datadog-3.59.4.tgz new file mode 100644 index 000000000..d404e56e4 Binary files /dev/null and b/assets/datadog/datadog-3.59.4.tgz differ diff --git a/assets/datadog/datadog-operator-1.6.0.tgz b/assets/datadog/datadog-operator-1.6.0.tgz new file mode 100644 index 000000000..5c4300a85 Binary files /dev/null and b/assets/datadog/datadog-operator-1.6.0.tgz differ diff --git a/assets/dynatrace/dynatrace-operator-1.0.0.tgz b/assets/dynatrace/dynatrace-operator-1.0.0.tgz new file mode 100644 index 000000000..da7ff64af Binary files /dev/null and b/assets/dynatrace/dynatrace-operator-1.0.0.tgz differ diff --git a/assets/external-secrets/external-secrets-0.9.14.tgz b/assets/external-secrets/external-secrets-0.9.14.tgz new file mode 100644 index 000000000..68a2220b5 Binary files /dev/null and b/assets/external-secrets/external-secrets-0.9.14.tgz differ diff --git a/assets/f5/nginx-ingress-1.2.0.tgz b/assets/f5/nginx-ingress-1.2.0.tgz new file mode 100644 index 000000000..61b97ef54 Binary files /dev/null and b/assets/f5/nginx-ingress-1.2.0.tgz differ diff --git a/assets/fairwinds/polaris-5.17.1.tgz b/assets/fairwinds/polaris-5.17.1.tgz new file mode 100644 index 000000000..43ea4898a Binary files /dev/null and b/assets/fairwinds/polaris-5.17.1.tgz differ diff --git a/assets/hashicorp/consul-1.4.1.tgz b/assets/hashicorp/consul-1.4.1.tgz new file mode 100644 index 000000000..3ec8a383d Binary files /dev/null and b/assets/hashicorp/consul-1.4.1.tgz differ diff --git a/assets/jenkins/jenkins-5.1.5.tgz b/assets/jenkins/jenkins-5.1.5.tgz new file mode 100644 index 000000000..603ad8318 Binary files /dev/null and b/assets/jenkins/jenkins-5.1.5.tgz differ diff --git a/assets/jfrog/artifactory-ha-107.77.8.tgz b/assets/jfrog/artifactory-ha-107.77.8.tgz new file mode 100644 index 000000000..022463e16 Binary files /dev/null and b/assets/jfrog/artifactory-ha-107.77.8.tgz differ diff --git a/assets/jfrog/artifactory-jcr-107.77.8.tgz b/assets/jfrog/artifactory-jcr-107.77.8.tgz new file mode 100644 index 000000000..38a1452de Binary files /dev/null and b/assets/jfrog/artifactory-jcr-107.77.8.tgz differ diff --git a/assets/kasten/k10-6.5.1001.tgz b/assets/kasten/k10-6.5.1001.tgz new file mode 100644 index 000000000..cf57b36fe Binary files /dev/null and b/assets/kasten/k10-6.5.1001.tgz differ diff --git a/assets/kubecost/cost-analyzer-2.1.1.tgz b/assets/kubecost/cost-analyzer-2.1.1.tgz index 095159d42..687774689 100644 Binary files a/assets/kubecost/cost-analyzer-2.1.1.tgz and b/assets/kubecost/cost-analyzer-2.1.1.tgz differ diff --git a/assets/kubecost/cost-analyzer-2.2.0.tgz b/assets/kubecost/cost-analyzer-2.2.0.tgz new file mode 100644 index 000000000..771483e5b Binary files /dev/null and b/assets/kubecost/cost-analyzer-2.2.0.tgz differ diff --git a/assets/kuma/kuma-2.6.4.tgz b/assets/kuma/kuma-2.6.4.tgz new file mode 100644 index 000000000..8ab314d89 Binary files /dev/null and b/assets/kuma/kuma-2.6.4.tgz differ diff --git a/assets/linkerd/linkerd-control-plane-2024.3.4.tgz b/assets/linkerd/linkerd-control-plane-2024.3.4.tgz index b327dfe74..8ad52e495 100644 Binary files a/assets/linkerd/linkerd-control-plane-2024.3.4.tgz and b/assets/linkerd/linkerd-control-plane-2024.3.4.tgz differ diff --git a/assets/linkerd/linkerd-control-plane-2024.3.5.tgz b/assets/linkerd/linkerd-control-plane-2024.3.5.tgz new file mode 100644 index 000000000..7f0fa713c Binary files /dev/null and b/assets/linkerd/linkerd-control-plane-2024.3.5.tgz differ diff --git a/assets/linkerd/linkerd-crds-2024.3.5.tgz b/assets/linkerd/linkerd-crds-2024.3.5.tgz new file mode 100644 index 000000000..90ab33802 Binary files /dev/null and b/assets/linkerd/linkerd-crds-2024.3.5.tgz differ diff --git a/assets/metallb/metallb-0.14.4.tgz b/assets/metallb/metallb-0.14.4.tgz new file mode 100644 index 000000000..7fc25ea14 Binary files /dev/null and b/assets/metallb/metallb-0.14.4.tgz differ diff --git a/assets/new-relic/nri-bundle-5.0.72.tgz b/assets/new-relic/nri-bundle-5.0.72.tgz new file mode 100644 index 000000000..45336caab Binary files /dev/null and b/assets/new-relic/nri-bundle-5.0.72.tgz differ diff --git a/assets/redpanda/redpanda-5.7.37.tgz b/assets/redpanda/redpanda-5.7.37.tgz new file mode 100644 index 000000000..b45a073c6 Binary files /dev/null and b/assets/redpanda/redpanda-5.7.37.tgz differ diff --git a/assets/speedscale/speedscale-operator-2.1.15.tgz b/assets/speedscale/speedscale-operator-2.1.15.tgz new file mode 100644 index 000000000..e1be813db Binary files /dev/null and b/assets/speedscale/speedscale-operator-2.1.15.tgz differ diff --git a/assets/stackstate/stackstate-k8s-agent-1.0.78.tgz b/assets/stackstate/stackstate-k8s-agent-1.0.78.tgz new file mode 100644 index 000000000..4838cbace Binary files /dev/null and b/assets/stackstate/stackstate-k8s-agent-1.0.78.tgz differ diff --git a/assets/yugabyte/yugabyte-2.18.7.tgz b/assets/yugabyte/yugabyte-2.18.7.tgz new file mode 100644 index 000000000..97a4c0e51 Binary files /dev/null and b/assets/yugabyte/yugabyte-2.18.7.tgz differ diff --git a/assets/yugabyte/yugaware-2.18.7.tgz b/assets/yugabyte/yugaware-2.18.7.tgz new file mode 100644 index 000000000..0e7559d66 Binary files /dev/null and b/assets/yugabyte/yugaware-2.18.7.tgz differ diff --git a/charts/argo/argo-cd/Chart.yaml b/charts/argo/argo-cd/Chart.yaml index ab7b239f2..3a1970521 100644 --- a/charts/argo/argo-cd/Chart.yaml +++ b/charts/argo/argo-cd/Chart.yaml @@ -1,7 +1,7 @@ annotations: artifacthub.io/changes: | - - kind: changed - description: Bump argo-cd to v2.10.4 + - kind: added + description: Add sizeLimit params on EmptyDir Volume artifacthub.io/signKey: | fingerprint: 2B8F22F57260EFA67BE1C5824B11F800CD9D2252 url: https://argoproj.github.io/argo-helm/pgp_keys.asc @@ -11,7 +11,7 @@ annotations: catalog.cattle.io/kube-version: '>=1.23.0-0' catalog.cattle.io/release-name: argo-cd apiVersion: v2 -appVersion: v2.10.4 +appVersion: v2.10.5 dependencies: - condition: redis-ha.enabled name: redis-ha @@ -33,4 +33,4 @@ name: argo-cd sources: - https://github.com/argoproj/argo-helm/tree/main/charts/argo-cd - https://github.com/argoproj/argo-cd -version: 6.7.3 +version: 6.7.8 diff --git a/charts/argo/argo-cd/README.md b/charts/argo/argo-cd/README.md index d43a0fdab..244c2641e 100644 --- a/charts/argo/argo-cd/README.md +++ b/charts/argo/argo-cd/README.md @@ -733,6 +733,7 @@ NAME: my-release | controller.dnsConfig | object | `{}` | [DNS configuration] | | controller.dnsPolicy | string | `"ClusterFirst"` | Alternative DNS policy for application controller pods | | controller.dynamicClusterDistribution | bool | `false` | Enable dynamic cluster distribution (alpha) Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/dynamic-cluster-distribution | +| controller.emptyDir.sizeLimit | string | `""` (defaults not set if not specified i.e. no size limit) | EmptyDir size limit for application controller | | controller.env | list | `[]` | Environment variables to pass to application controller | | controller.envFrom | list | `[]` (See [values.yaml]) | envFrom to pass to application controller | | controller.extraArgs | list | `[]` | Additional command line arguments to pass to application controller | @@ -827,6 +828,7 @@ NAME: my-release | repoServer.deploymentStrategy | object | `{}` | Deployment strategy to be added to the repo server Deployment | | repoServer.dnsConfig | object | `{}` | [DNS configuration] | | repoServer.dnsPolicy | string | `"ClusterFirst"` | Alternative DNS policy for Repo server pods | +| repoServer.emptyDir.sizeLimit | string | `""` (defaults not set if not specified i.e. no size limit) | EmptyDir size limit for repo server | | repoServer.env | list | `[]` | Environment variables to pass to repo server | | repoServer.envFrom | list | `[]` (See [values.yaml]) | envFrom to pass to repo server | | repoServer.existingVolumes | object | `{}` | Volumes to be used in replacement of emptydir on default volumes | @@ -928,6 +930,8 @@ NAME: my-release | server.certificateSecret.enabled | bool | `false` | Create argocd-server-tls secret | | server.certificateSecret.key | string | `""` | Private Key of the certificate | | server.certificateSecret.labels | object | `{}` | Labels to be added to argocd-server-tls secret | +| server.clusterRoleRules.enabled | bool | `false` | Enable custom rules for the server's ClusterRole resource | +| server.clusterRoleRules.rules | list | `[]` | List of custom rules for the server's ClusterRole resource | | server.containerPorts.metrics | int | `8083` | Metrics container port | | server.containerPorts.server | int | `8080` | Server container port | | server.containerSecurityContext | object | See [values.yaml] | Server container-level security context | @@ -935,6 +939,7 @@ NAME: my-release | server.deploymentStrategy | object | `{}` | Deployment strategy to be added to the server Deployment | | server.dnsConfig | object | `{}` | [DNS configuration] | | server.dnsPolicy | string | `"ClusterFirst"` | Alternative DNS policy for Server pods | +| server.emptyDir.sizeLimit | string | `""` (defaults not set if not specified i.e. no size limit) | EmptyDir size limit for the Argo CD server | | server.env | list | `[]` | Environment variables to pass to Argo CD server | | server.envFrom | list | `[]` (See [values.yaml]) | envFrom to pass to Argo CD server | | server.extensions.containerSecurityContext | object | See [values.yaml] | Server UI extensions container-level security context | @@ -1073,6 +1078,7 @@ NAME: my-release | dex.deploymentStrategy | object | `{}` | Deployment strategy to be added to the Dex server Deployment | | dex.dnsConfig | object | `{}` | [DNS configuration] | | dex.dnsPolicy | string | `"ClusterFirst"` | Alternative DNS policy for Dex server pods | +| dex.emptyDir.sizeLimit | string | `""` (defaults not set if not specified i.e. no size limit) | EmptyDir size limit for Dex server | | dex.enabled | bool | `true` | Enable dex | | dex.env | list | `[]` | Environment variables to pass to the Dex server | | dex.envFrom | list | `[]` (See [values.yaml]) | envFrom to pass to the Dex server | @@ -1325,6 +1331,7 @@ If you want to use an existing Redis (eg. a managed service from a cloud provide | applicationSet.deploymentStrategy | object | `{}` | Deployment strategy to be added to the ApplicationSet controller Deployment | | applicationSet.dnsConfig | object | `{}` | [DNS configuration] | | applicationSet.dnsPolicy | string | `"ClusterFirst"` | Alternative DNS policy for ApplicationSet controller pods | +| applicationSet.emptyDir.sizeLimit | string | `""` (defaults not set if not specified i.e. no size limit) | EmptyDir size limit for applicationSet controller | | applicationSet.enabled | bool | `true` | Enable ApplicationSet controller | | applicationSet.extraArgs | list | `[]` | ApplicationSet controller command line flags | | applicationSet.extraContainers | list | `[]` | Additional containers to be added to the ApplicationSet controller pod | diff --git a/charts/argo/argo-cd/templates/argocd-application-controller/deployment.yaml b/charts/argo/argo-cd/templates/argocd-application-controller/deployment.yaml index e0c121359..232ce2679 100644 --- a/charts/argo/argo-cd/templates/argocd-application-controller/deployment.yaml +++ b/charts/argo/argo-cd/templates/argocd-application-controller/deployment.yaml @@ -334,7 +334,13 @@ spec: {{- toYaml . | nindent 6 }} {{- end }} - name: argocd-home + {{- if .Values.controller.emptyDir.sizeLimit }} + emptyDir: + sizeLimit: {{ .Values.controller.emptyDir.sizeLimit }} + {{- else }} emptyDir: {} + {{- end }} + - name: argocd-repo-server-tls secret: secretName: argocd-repo-server-tls diff --git a/charts/argo/argo-cd/templates/argocd-application-controller/statefulset.yaml b/charts/argo/argo-cd/templates/argocd-application-controller/statefulset.yaml index 3b72d19f5..80535e675 100644 --- a/charts/argo/argo-cd/templates/argocd-application-controller/statefulset.yaml +++ b/charts/argo/argo-cd/templates/argocd-application-controller/statefulset.yaml @@ -333,7 +333,12 @@ spec: {{- toYaml . | nindent 6 }} {{- end }} - name: argocd-home + {{- if .Values.controller.emptyDir.sizeLimit }} + emptyDir: + sizeLimit: {{ .Values.controller.emptyDir.sizeLimit }} + {{- else }} emptyDir: {} + {{- end }} - name: argocd-repo-server-tls secret: secretName: argocd-repo-server-tls diff --git a/charts/argo/argo-cd/templates/argocd-applicationset/deployment.yaml b/charts/argo/argo-cd/templates/argocd-applicationset/deployment.yaml index 96fc38044..795f18d00 100644 --- a/charts/argo/argo-cd/templates/argocd-applicationset/deployment.yaml +++ b/charts/argo/argo-cd/templates/argocd-applicationset/deployment.yaml @@ -302,9 +302,19 @@ spec: configMap: name: argocd-gpg-keys-cm - name: gpg-keyring + {{- if .Values.applicationSet.emptyDir.sizeLimit }} + emptyDir: + sizeLimit: {{ .Values.applicationSet.emptyDir.sizeLimit }} + {{- else }} emptyDir: {} + {{- end }} - name: tmp + {{- if .Values.applicationSet.emptyDir.sizeLimit }} + emptyDir: + sizeLimit: {{ .Values.applicationSet.emptyDir.sizeLimit }} + {{- else }} emptyDir: {} + {{- end }} - name: argocd-repo-server-tls secret: secretName: argocd-repo-server-tls diff --git a/charts/argo/argo-cd/templates/argocd-notifications/clusterrole.yaml b/charts/argo/argo-cd/templates/argocd-notifications/clusterrole.yaml index edb957f32..eba5973f5 100644 --- a/charts/argo/argo-cd/templates/argocd-notifications/clusterrole.yaml +++ b/charts/argo/argo-cd/templates/argocd-notifications/clusterrole.yaml @@ -28,6 +28,9 @@ rules: verbs: - list - watch + {{- if (index .Values.configs.params "application.namespaces") }} + - create + {{- end }} {{- if .Values.notifications.cm.create }} - apiGroups: - "" diff --git a/charts/argo/argo-cd/templates/argocd-repo-server/deployment.yaml b/charts/argo/argo-cd/templates/argocd-repo-server/deployment.yaml index d27c15d91..590e53871 100644 --- a/charts/argo/argo-cd/templates/argocd-repo-server/deployment.yaml +++ b/charts/argo/argo-cd/templates/argocd-repo-server/deployment.yaml @@ -394,26 +394,46 @@ spec: {{- if .Values.repoServer.existingVolumes.helmWorkingDir -}} {{ toYaml .Values.repoServer.existingVolumes.helmWorkingDir | nindent 8 }} {{- else }} + {{- if .Values.repoServer.emptyDir.sizeLimit }} + emptyDir: + sizeLimit: {{ .Values.repoServer.emptyDir.sizeLimit }} + {{- else }} emptyDir: {} + {{- end }} {{- end }} {{- end }} - name: plugins {{- if .Values.repoServer.existingVolumes.plugins -}} {{ toYaml .Values.repoServer.existingVolumes.plugins | nindent 8 }} {{- else }} + {{- if .Values.repoServer.emptyDir.sizeLimit }} + emptyDir: + sizeLimit: {{ .Values.repoServer.emptyDir.sizeLimit }} + {{- else }} emptyDir: {} + {{- end }} {{- end }} - name: var-files {{- if .Values.repoServer.existingVolumes.varFiles -}} {{ toYaml .Values.repoServer.existingVolumes.varFiles | nindent 8 }} {{- else }} + {{- if .Values.repoServer.emptyDir.sizeLimit }} + emptyDir: + sizeLimit: {{ .Values.repoServer.emptyDir.sizeLimit }} + {{- else }} emptyDir: {} + {{- end }} {{- end }} - name: tmp {{- if .Values.repoServer.existingVolumes.tmp -}} {{ toYaml .Values.repoServer.existingVolumes.tmp | nindent 8 }} {{- else }} + {{- if .Values.repoServer.emptyDir.sizeLimit }} + emptyDir: + sizeLimit: {{ .Values.repoServer.emptyDir.sizeLimit }} + {{- else }} emptyDir: {} + {{- end }} {{- end }} - name: ssh-known-hosts configMap: @@ -428,7 +448,12 @@ spec: {{- if .Values.repoServer.existingVolumes.gpgKeyring -}} {{ toYaml .Values.repoServer.existingVolumes.gpgKeyring | nindent 8 }} {{- else }} + {{- if .Values.repoServer.emptyDir.sizeLimit }} + emptyDir: + sizeLimit: {{ .Values.repoServer.emptyDir.sizeLimit }} + {{- else }} emptyDir: {} + {{- end }} {{- end }} - name: argocd-repo-server-tls secret: diff --git a/charts/argo/argo-cd/templates/argocd-server/clusterrole.yaml b/charts/argo/argo-cd/templates/argocd-server/clusterrole.yaml index f4877980e..c1439b0c9 100644 --- a/charts/argo/argo-cd/templates/argocd-server/clusterrole.yaml +++ b/charts/argo/argo-cd/templates/argocd-server/clusterrole.yaml @@ -6,6 +6,9 @@ metadata: labels: {{- include "argo-cd.labels" (dict "context" . "component" .Values.server.name "name" .Values.server.name) | nindent 4 }} rules: + {{- if .Values.server.clusterRoleRules.enabled }} + {{- toYaml .Values.server.clusterRoleRules.rules | nindent 2 }} + {{- else }} - apiGroups: - '*' resources: @@ -48,6 +51,17 @@ rules: - list - update - watch + {{- if (index .Values.configs.params "application.namespaces") }} + - apiGroups: + - "argoproj.io" + resources: + - "applications" + verbs: + - create + - delete + - update + - patch + {{- end }} - apiGroups: - batch resources: @@ -62,4 +76,5 @@ rules: verbs: {{/* supports triggering workflows from UI */}} - create + {{- end }} {{- end }} diff --git a/charts/argo/argo-cd/templates/argocd-server/deployment.yaml b/charts/argo/argo-cd/templates/argocd-server/deployment.yaml index 6d614b0f5..faaf1c268 100644 --- a/charts/argo/argo-cd/templates/argocd-server/deployment.yaml +++ b/charts/argo/argo-cd/templates/argocd-server/deployment.yaml @@ -442,12 +442,27 @@ spec: {{- end }} {{- if .Values.server.extensions.enabled }} - name: extensions + {{- if .Values.server.emptyDir.sizeLimit }} + emptyDir: + sizeLimit: {{ .Values.server.emptyDir.sizeLimit }} + {{- else }} emptyDir: {} + {{- end }} {{- end }} - name: plugins-home + {{- if .Values.server.emptyDir.sizeLimit }} + emptyDir: + sizeLimit: {{ .Values.server.emptyDir.sizeLimit }} + {{- else }} emptyDir: {} + {{- end }} - name: tmp + {{- if .Values.server.emptyDir.sizeLimit }} + emptyDir: + sizeLimit: {{ .Values.server.emptyDir.sizeLimit }} + {{- else }} emptyDir: {} + {{- end }} - name: ssh-known-hosts configMap: name: argocd-ssh-known-hosts-cm diff --git a/charts/argo/argo-cd/templates/crds/crd-applicationset.yaml b/charts/argo/argo-cd/templates/crds/crd-applicationset.yaml index 02623f6c3..323beb27f 100644 --- a/charts/argo/argo-cd/templates/crds/crd-applicationset.yaml +++ b/charts/argo/argo-cd/templates/crds/crd-applicationset.yaml @@ -2381,8 +2381,6 @@ spec: - metadata - spec type: object - required: - - elements type: object matrix: properties: @@ -4725,8 +4723,6 @@ spec: - metadata - spec type: object - required: - - elements type: object matrix: x-kubernetes-preserve-unknown-fields: true @@ -9752,8 +9748,6 @@ spec: - metadata - spec type: object - required: - - elements type: object matrix: x-kubernetes-preserve-unknown-fields: true diff --git a/charts/argo/argo-cd/templates/dex/deployment.yaml b/charts/argo/argo-cd/templates/dex/deployment.yaml index 61f3fe86a..3c293e7cc 100644 --- a/charts/argo/argo-cd/templates/dex/deployment.yaml +++ b/charts/argo/argo-cd/templates/dex/deployment.yaml @@ -187,9 +187,19 @@ spec: {{- end }} volumes: - name: static-files + {{- if .Values.dex.emptyDir.sizeLimit }} + emptyDir: + sizeLimit: {{ .Values.dex.emptyDir.sizeLimit }} + {{- else }} emptyDir: {} + {{- end }} - name: dexconfig + {{- if .Values.dex.emptyDir.sizeLimit }} + emptyDir: + sizeLimit: {{ .Values.dex.emptyDir.sizeLimit }} + {{- else }} emptyDir: {} + {{- end }} - name: argocd-dex-server-tls secret: secretName: argocd-dex-server-tls diff --git a/charts/argo/argo-cd/values.yaml b/charts/argo/argo-cd/values.yaml index a281d0197..1285eccf7 100644 --- a/charts/argo/argo-cd/values.yaml +++ b/charts/argo/argo-cd/values.yaml @@ -673,6 +673,13 @@ controller: # - name: custom-tools # emptyDir: {} + ## Application controller emptyDir volumes + emptyDir: + # -- EmptyDir size limit for application controller + # @default -- `""` (defaults not set if not specified i.e. no size limit) + sizeLimit: "" + # sizeLimit: "1Gi" + # -- Annotations for the application controller StatefulSet statefulsetAnnotations: {} @@ -989,6 +996,13 @@ dex: # -- Additional volumes to the dex pod volumes: [] + ## Dex server emptyDir volumes + emptyDir: + # -- EmptyDir size limit for Dex server + # @default -- `""` (defaults not set if not specified i.e. no size limit) + sizeLimit: "" + # sizeLimit: "1Gi" + # TLS certificate configuration via Secret ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/tls/#configuring-tls-to-argocd-dex-server ## Note: Issuing certificates via cert-manager in not supported right now because it's not possible to restart Dex automatically without extra controllers. @@ -1744,6 +1758,13 @@ server: # - name: custom-tools # emptyDir: {} + ## Argo CD server emptyDir volumes + emptyDir: + # -- EmptyDir size limit for the Argo CD server + # @default -- `""` (defaults not set if not specified i.e. no size limit) + sizeLimit: "" + # sizeLimit: "1Gi" + # -- Annotations to be added to server Deployment deploymentAnnotations: {} @@ -2194,6 +2215,14 @@ server: # -- Termination policy of Openshift Route termination_policy: None + ## Enable this and set the rules: to whatever custom rules you want for the Cluster Role resource. + ## Defaults to off + clusterRoleRules: + # -- Enable custom rules for the server's ClusterRole resource + enabled: false + # -- List of custom rules for the server's ClusterRole resource + rules: [] + ## Repo Server repoServer: # -- Repo server name @@ -2358,6 +2387,13 @@ repoServer: # persistentVolumeClaim: # claimName: pvc-argocd-repo-server-plugins + ## RepoServer emptyDir volumes + emptyDir: + # -- EmptyDir size limit for repo server + # @default -- `""` (defaults not set if not specified i.e. no size limit) + sizeLimit: "" + # sizeLimit: "1Gi" + # -- Toggle the usage of a ephemeral Helm working directory useEphemeralHelmWorkingDir: true @@ -2645,6 +2681,13 @@ applicationSet: # -- List of extra volumes to add extraVolumes: [] + ## ApplicationSet controller emptyDir volumes + emptyDir: + # -- EmptyDir size limit for applicationSet controller + # @default -- `""` (defaults not set if not specified i.e. no size limit) + sizeLimit: "" + # sizeLimit: "1Gi" + ## Metrics service configuration metrics: # -- Deploy metrics service diff --git a/charts/bitnami/cassandra/Chart.yaml b/charts/bitnami/cassandra/Chart.yaml index be74e8382..a2b7a07e3 100644 --- a/charts/bitnami/cassandra/Chart.yaml +++ b/charts/bitnami/cassandra/Chart.yaml @@ -6,11 +6,11 @@ annotations: category: Database images: | - name: cassandra - image: docker.io/bitnami/cassandra:4.1.4-debian-12-r4 + image: docker.io/bitnami/cassandra:4.1.4-debian-12-r5 - name: cassandra-exporter - image: docker.io/bitnami/cassandra-exporter:2.3.8-debian-12-r17 + image: docker.io/bitnami/cassandra-exporter:2.3.8-debian-12-r18 - name: os-shell - image: docker.io/bitnami/os-shell:12-debian-12-r16 + image: docker.io/bitnami/os-shell:12-debian-12-r17 licenses: Apache-2.0 apiVersion: v2 appVersion: 4.1.4 @@ -35,4 +35,4 @@ maintainers: name: cassandra sources: - https://github.com/bitnami/charts/tree/main/bitnami/cassandra -version: 11.0.0 +version: 11.0.1 diff --git a/charts/bitnami/cassandra/values.yaml b/charts/bitnami/cassandra/values.yaml index f9d102564..02a9a0fc0 100644 --- a/charts/bitnami/cassandra/values.yaml +++ b/charts/bitnami/cassandra/values.yaml @@ -82,7 +82,7 @@ diagnosticMode: image: registry: docker.io repository: bitnami/cassandra - tag: 4.1.4-debian-12-r4 + tag: 4.1.4-debian-12-r5 digest: "" ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' @@ -678,7 +678,7 @@ volumePermissions: image: registry: docker.io repository: bitnami/os-shell - tag: 12-debian-12-r16 + tag: 12-debian-12-r17 digest: "" pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. @@ -746,7 +746,7 @@ metrics: image: registry: docker.io repository: bitnami/cassandra-exporter - tag: 2.3.8-debian-12-r17 + tag: 2.3.8-debian-12-r18 digest: "" pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. diff --git a/charts/bitnami/kafka/Chart.lock b/charts/bitnami/kafka/Chart.lock index bd5bc6dad..6cb6959f2 100644 --- a/charts/bitnami/kafka/Chart.lock +++ b/charts/bitnami/kafka/Chart.lock @@ -1,9 +1,9 @@ dependencies: - name: zookeeper repository: oci://registry-1.docker.io/bitnamicharts - version: 12.11.1 + version: 13.0.1 - name: common repository: oci://registry-1.docker.io/bitnamicharts - version: 2.18.0 -digest: sha256:45e9e003da296d6f4d54e86584f77c90f91744427321717b4b7cb3873dd89ea0 -generated: "2024-03-05T14:17:52.910919633+01:00" + version: 2.19.0 +digest: sha256:7372949fd4cf0b3c5bd39e6c5024a59a880cd3f1a02ef6da1c8910c01e4b7e62 +generated: "2024-03-25T18:32:15.845585148+01:00" diff --git a/charts/bitnami/kafka/Chart.yaml b/charts/bitnami/kafka/Chart.yaml index 53b36b18c..83c15617d 100644 --- a/charts/bitnami/kafka/Chart.yaml +++ b/charts/bitnami/kafka/Chart.yaml @@ -22,7 +22,7 @@ dependencies: - condition: zookeeper.enabled name: zookeeper repository: file://./charts/zookeeper - version: 12.x.x + version: 13.x.x - name: common repository: file://./charts/common tags: @@ -45,4 +45,4 @@ maintainers: name: kafka sources: - https://github.com/bitnami/charts/tree/main/bitnami/kafka -version: 27.1.2 +version: 28.0.1 diff --git a/charts/bitnami/kafka/README.md b/charts/bitnami/kafka/README.md index 5d39aa298..2babdbb66 100644 --- a/charts/bitnami/kafka/README.md +++ b/charts/bitnami/kafka/README.md @@ -42,698 +42,6 @@ These commands deploy Kafka on the Kubernetes cluster in the default configurati > **Tip**: List all releases using `helm list` -## Uninstalling the Chart - -To uninstall/delete the `my-release` deployment: - -```console -helm delete my-release -``` - -The command removes all the Kubernetes components associated with the chart and deletes the release. - -## Parameters - -### Global parameters - -| Name | Description | Value | -| ----------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------- | -| `global.imageRegistry` | Global Docker image registry | `""` | -| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | -| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | -| `global.compatibility.openshift.adaptSecurityContext` | Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation) | `disabled` | - -### Common parameters - -| Name | Description | Value | -| ------------------------- | --------------------------------------------------------------------------------------- | --------------- | -| `kubeVersion` | Override Kubernetes version | `""` | -| `nameOverride` | String to partially override common.names.fullname | `""` | -| `fullnameOverride` | String to fully override common.names.fullname | `""` | -| `clusterDomain` | Default Kubernetes cluster domain | `cluster.local` | -| `commonLabels` | Labels to add to all deployed objects | `{}` | -| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | -| `extraDeploy` | Array of extra objects to deploy with the release | `[]` | -| `serviceBindings.enabled` | Create secret for service binding (Experimental) | `false` | -| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | -| `diagnosticMode.command` | Command to override all containers in the statefulset | `["sleep"]` | -| `diagnosticMode.args` | Args to override all containers in the statefulset | `["infinity"]` | - -### Kafka parameters - -| Name | Description | Value | -| ------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------- | -| `image.registry` | Kafka image registry | `REGISTRY_NAME` | -| `image.repository` | Kafka image repository | `REPOSITORY_NAME/kafka` | -| `image.digest` | Kafka image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | -| `image.pullPolicy` | Kafka image pull policy | `IfNotPresent` | -| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | -| `image.debug` | Specify if debug values should be set | `false` | -| `extraInit` | Additional content for the kafka init script, rendered as a template. | `""` | -| `config` | Configuration file for Kafka, rendered as a template. Auto-generated based on chart values when not specified. | `""` | -| `existingConfigmap` | ConfigMap with Kafka Configuration | `""` | -| `extraConfig` | Additional configuration to be appended at the end of the generated Kafka configuration file. | `""` | -| `secretConfig` | Additional configuration to be appended at the end of the generated Kafka configuration file. | `""` | -| `existingSecretConfig` | Secret with additonal configuration that will be appended to the end of the generated Kafka configuration file | `""` | -| `log4j` | An optional log4j.properties file to overwrite the default of the Kafka brokers | `""` | -| `existingLog4jConfigMap` | The name of an existing ConfigMap containing a log4j.properties file | `""` | -| `heapOpts` | Kafka Java Heap size | `-Xmx1024m -Xms1024m` | -| `interBrokerProtocolVersion` | Override the setting 'inter.broker.protocol.version' during the ZK migration. | `""` | -| `listeners.client.name` | Name for the Kafka client listener | `CLIENT` | -| `listeners.client.containerPort` | Port for the Kafka client listener | `9092` | -| `listeners.client.protocol` | Security protocol for the Kafka client listener. Allowed values are 'PLAINTEXT', 'SASL_PLAINTEXT', 'SASL_SSL' and 'SSL' | `SASL_PLAINTEXT` | -| `listeners.client.sslClientAuth` | Optional. If SASL_SSL is enabled, configure mTLS TLS authentication type. If SSL protocol is enabled, overrides tls.authType for this listener. Allowed values are 'none', 'requested' and 'required' | `""` | -| `listeners.controller.name` | Name for the Kafka controller listener | `CONTROLLER` | -| `listeners.controller.containerPort` | Port for the Kafka controller listener | `9093` | -| `listeners.controller.protocol` | Security protocol for the Kafka controller listener. Allowed values are 'PLAINTEXT', 'SASL_PLAINTEXT', 'SASL_SSL' and 'SSL' | `SASL_PLAINTEXT` | -| `listeners.controller.sslClientAuth` | Optional. If SASL_SSL is enabled, configure mTLS TLS authentication type. If SSL protocol is enabled, overrides tls.authType for this listener. Allowed values are 'none', 'requested' and 'required' | `""` | -| `listeners.interbroker.name` | Name for the Kafka inter-broker listener | `INTERNAL` | -| `listeners.interbroker.containerPort` | Port for the Kafka inter-broker listener | `9094` | -| `listeners.interbroker.protocol` | Security protocol for the Kafka inter-broker listener. Allowed values are 'PLAINTEXT', 'SASL_PLAINTEXT', 'SASL_SSL' and 'SSL' | `SASL_PLAINTEXT` | -| `listeners.interbroker.sslClientAuth` | Optional. If SASL_SSL is enabled, configure mTLS TLS authentication type. If SSL protocol is enabled, overrides tls.authType for this listener. Allowed values are 'none', 'requested' and 'required' | `""` | -| `listeners.external.containerPort` | Port for the Kafka external listener | `9095` | -| `listeners.external.protocol` | Security protocol for the Kafka external listener. . Allowed values are 'PLAINTEXT', 'SASL_PLAINTEXT', 'SASL_SSL' and 'SSL' | `SASL_PLAINTEXT` | -| `listeners.external.name` | Name for the Kafka external listener | `EXTERNAL` | -| `listeners.external.sslClientAuth` | Optional. If SASL_SSL is enabled, configure mTLS TLS authentication type. If SSL protocol is enabled, overrides tls.sslClientAuth for this listener. Allowed values are 'none', 'requested' and 'required' | `""` | -| `listeners.extraListeners` | Array of listener objects to be appended to already existing listeners | `[]` | -| `listeners.overrideListeners` | Overrides the Kafka 'listeners' configuration setting. | `""` | -| `listeners.advertisedListeners` | Overrides the Kafka 'advertised.listener' configuration setting. | `""` | -| `listeners.securityProtocolMap` | Overrides the Kafka 'security.protocol.map' configuration setting. | `""` | - -### Kafka SASL parameters - -| Name | Description | Value | -| ----------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------- | -| `sasl.enabledMechanisms` | Comma-separated list of allowed SASL mechanisms when SASL listeners are configured. Allowed types: `PLAIN`, `SCRAM-SHA-256`, `SCRAM-SHA-512`, `OAUTHBEARER` | `PLAIN,SCRAM-SHA-256,SCRAM-SHA-512` | -| `sasl.interBrokerMechanism` | SASL mechanism for inter broker communication. | `PLAIN` | -| `sasl.controllerMechanism` | SASL mechanism for controller communications. | `PLAIN` | -| `sasl.oauthbearer.tokenEndpointUrl` | The URL for the OAuth/OIDC identity provider | `""` | -| `sasl.oauthbearer.jwksEndpointUrl` | The OAuth/OIDC provider URL from which the provider's JWKS (JSON Web Key Set) can be retrieved | `""` | -| `sasl.oauthbearer.expectedAudience` | The comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences | `""` | -| `sasl.oauthbearer.subClaimName` | The OAuth claim name for the subject. | `sub` | -| `sasl.interbroker.user` | Username for inter-broker communications when SASL is enabled | `inter_broker_user` | -| `sasl.interbroker.password` | Password for inter-broker communications when SASL is enabled. If not set and SASL is enabled for the controller listener, a random password will be generated. | `""` | -| `sasl.interbroker.clientId` | Client ID for inter-broker communications when SASL is enabled with mechanism OAUTHBEARER | `inter_broker_client` | -| `sasl.interbroker.clientSecret` | Client Secret for inter-broker communications when SASL is enabled with mechanism OAUTHBEARER. If not set and SASL is enabled for the controller listener, a random secret will be generated. | `""` | -| `sasl.controller.user` | Username for controller communications when SASL is enabled | `controller_user` | -| `sasl.controller.password` | Password for controller communications when SASL is enabled. If not set and SASL is enabled for the inter-broker listener, a random password will be generated. | `""` | -| `sasl.controller.clientId` | Client ID for controller communications when SASL is enabled with mechanism OAUTHBEARER | `controller_broker_client` | -| `sasl.controller.clientSecret` | Client Secret for controller communications when SASL is enabled with mechanism OAUTHBEARER. If not set and SASL is enabled for the inter-broker listener, a random secret will be generated. | `""` | -| `sasl.client.users` | Comma-separated list of usernames for client communications when SASL is enabled | `["user1"]` | -| `sasl.client.passwords` | Comma-separated list of passwords for client communications when SASL is enabled, must match the number of client.users | `""` | -| `sasl.zookeeper.user` | Username for zookeeper communications when SASL is enabled. | `""` | -| `sasl.zookeeper.password` | Password for zookeeper communications when SASL is enabled. | `""` | -| `sasl.existingSecret` | Name of the existing secret containing credentials for clientUsers, interBrokerUser, controllerUser and zookeeperUser | `""` | - -### Kafka TLS parameters - -| Name | Description | Value | -| -------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------- | -------------------------- | -| `tls.type` | Format to use for TLS certificates. Allowed types: `JKS` and `PEM` | `JKS` | -| `tls.pemChainIncluded` | Flag to denote that the Certificate Authority (CA) certificates are bundled with the endpoint cert. | `false` | -| `tls.existingSecret` | Name of the existing secret containing the TLS certificates for the Kafka nodes. | `""` | -| `tls.autoGenerated` | Generate automatically self-signed TLS certificates for Kafka brokers. Currently only supported if `tls.type` is `PEM` | `false` | -| `tls.passwordsSecret` | Name of the secret containing the password to access the JKS files or PEM key when they are password-protected. (`key`: `password`) | `""` | -| `tls.passwordsSecretKeystoreKey` | The secret key from the tls.passwordsSecret containing the password for the Keystore. | `keystore-password` | -| `tls.passwordsSecretTruststoreKey` | The secret key from the tls.passwordsSecret containing the password for the Truststore. | `truststore-password` | -| `tls.passwordsSecretPemPasswordKey` | The secret key from the tls.passwordsSecret containing the password for the PEM key inside 'tls.passwordsSecret'. | `""` | -| `tls.keystorePassword` | Password to access the JKS keystore when it is password-protected. Ignored when 'tls.passwordsSecret' is provided. | `""` | -| `tls.truststorePassword` | Password to access the JKS truststore when it is password-protected. Ignored when 'tls.passwordsSecret' is provided. | `""` | -| `tls.keyPassword` | Password to access the PEM key when it is password-protected. | `""` | -| `tls.jksKeystoreKey` | The secret key from the `tls.existingSecret` containing the keystore | `""` | -| `tls.jksTruststoreSecret` | Name of the existing secret containing your truststore if truststore not existing or different from the one in the `tls.existingSecret` | `""` | -| `tls.jksTruststoreKey` | The secret key from the `tls.existingSecret` or `tls.jksTruststoreSecret` containing the truststore | `""` | -| `tls.endpointIdentificationAlgorithm` | The endpoint identification algorithm to validate server hostname using server certificate | `https` | -| `tls.sslClientAuth` | Sets the default value for the ssl.client.auth Kafka setting. | `required` | -| `tls.zookeeper.enabled` | Enable TLS for Zookeeper client connections. | `false` | -| `tls.zookeeper.verifyHostname` | Hostname validation. | `true` | -| `tls.zookeeper.existingSecret` | Name of the existing secret containing the TLS certificates for ZooKeeper client communications. | `""` | -| `tls.zookeeper.existingSecretKeystoreKey` | The secret key from the tls.zookeeper.existingSecret containing the Keystore. | `zookeeper.keystore.jks` | -| `tls.zookeeper.existingSecretTruststoreKey` | The secret key from the tls.zookeeper.existingSecret containing the Truststore. | `zookeeper.truststore.jks` | -| `tls.zookeeper.passwordsSecret` | Existing secret containing Keystore and Truststore passwords. | `""` | -| `tls.zookeeper.passwordsSecretKeystoreKey` | The secret key from the tls.zookeeper.passwordsSecret containing the password for the Keystore. | `keystore-password` | -| `tls.zookeeper.passwordsSecretTruststoreKey` | The secret key from the tls.zookeeper.passwordsSecret containing the password for the Truststore. | `truststore-password` | -| `tls.zookeeper.keystorePassword` | Password to access the JKS keystore when it is password-protected. Ignored when 'tls.passwordsSecret' is provided. | `""` | -| `tls.zookeeper.truststorePassword` | Password to access the JKS truststore when it is password-protected. Ignored when 'tls.passwordsSecret' is provided. | `""` | -| `extraEnvVars` | Extra environment variables to add to Kafka pods | `[]` | -| `extraEnvVarsCM` | ConfigMap with extra environment variables | `""` | -| `extraEnvVarsSecret` | Secret with extra environment variables | `""` | -| `extraVolumes` | Optionally specify extra list of additional volumes for the Kafka pod(s) | `[]` | -| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Kafka container(s) | `[]` | -| `sidecars` | Add additional sidecar containers to the Kafka pod(s) | `[]` | -| `initContainers` | Add additional Add init containers to the Kafka pod(s) | `[]` | -| `dnsPolicy` | Specifies the DNS policy for the zookeeper pods | `""` | -| `dnsConfig` | allows users more control on the DNS settings for a Pod. Required if `dnsPolicy` is set to `None` | `{}` | - -### Controller-eligible statefulset parameters - -| Name | Description | Value | -| -------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------- | -| `controller.replicaCount` | Number of Kafka controller-eligible nodes | `3` | -| `controller.controllerOnly` | If set to true, controller nodes will be deployed as dedicated controllers, instead of controller+broker processes. | `false` | -| `controller.minId` | Minimal node.id values for controller-eligible nodes. Do not change after first initialization. | `0` | -| `controller.zookeeperMigrationMode` | Set to true to deploy cluster controller quorum | `false` | -| `controller.config` | Configuration file for Kafka controller-eligible nodes, rendered as a template. Auto-generated based on chart values when not specified. | `""` | -| `controller.existingConfigmap` | ConfigMap with Kafka Configuration for controller-eligible nodes. | `""` | -| `controller.extraConfig` | Additional configuration to be appended at the end of the generated Kafka controller-eligible nodes configuration file. | `""` | -| `controller.secretConfig` | Additional configuration to be appended at the end of the generated Kafka controller-eligible nodes configuration file. | `""` | -| `controller.existingSecretConfig` | Secret with additonal configuration that will be appended to the end of the generated Kafka controller-eligible nodes configuration file | `""` | -| `controller.heapOpts` | Kafka Java Heap size for controller-eligible nodes | `-Xmx1024m -Xms1024m` | -| `controller.command` | Override Kafka container command | `[]` | -| `controller.args` | Override Kafka container arguments | `[]` | -| `controller.extraEnvVars` | Extra environment variables to add to Kafka pods | `[]` | -| `controller.extraEnvVarsCM` | ConfigMap with extra environment variables | `""` | -| `controller.extraEnvVarsSecret` | Secret with extra environment variables | `""` | -| `controller.extraContainerPorts` | Kafka controller-eligible extra containerPorts. | `[]` | -| `controller.livenessProbe.enabled` | Enable livenessProbe on Kafka containers | `true` | -| `controller.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `10` | -| `controller.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | -| `controller.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | -| `controller.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` | -| `controller.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | -| `controller.readinessProbe.enabled` | Enable readinessProbe on Kafka containers | `true` | -| `controller.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | -| `controller.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | -| `controller.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | -| `controller.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | -| `controller.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | -| `controller.startupProbe.enabled` | Enable startupProbe on Kafka containers | `false` | -| `controller.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` | -| `controller.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | -| `controller.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | -| `controller.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | -| `controller.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | -| `controller.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | -| `controller.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | -| `controller.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | -| `controller.lifecycleHooks` | lifecycleHooks for the Kafka container to automate configuration before or after startup | `{}` | -| `controller.initContainerResources.limits` | The resources limits for the init container | `{}` | -| `controller.initContainerResources.requests` | The requested resources for the init container | `{}` | -| `controller.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if controller.resources is set (controller.resources is recommended for production). | `none` | -| `controller.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | -| `controller.podSecurityContext.enabled` | Enable security context for the pods | `true` | -| `controller.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | -| `controller.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | -| `controller.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | -| `controller.podSecurityContext.fsGroup` | Set Kafka pod's Security Context fsGroup | `1001` | -| `controller.podSecurityContext.seccompProfile.type` | Set Kafka pods's Security Context seccomp profile | `RuntimeDefault` | -| `controller.containerSecurityContext.enabled` | Enable Kafka containers' Security Context | `true` | -| `controller.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `nil` | -| `controller.containerSecurityContext.runAsUser` | Set Kafka containers' Security Context runAsUser | `1001` | -| `controller.containerSecurityContext.runAsNonRoot` | Set Kafka containers' Security Context runAsNonRoot | `true` | -| `controller.containerSecurityContext.allowPrivilegeEscalation` | Force the child process to be run as non-privileged | `false` | -| `controller.containerSecurityContext.readOnlyRootFilesystem` | Allows the pod to mount the RootFS as ReadOnly only | `true` | -| `controller.containerSecurityContext.capabilities.drop` | Set Kafka containers' server Security Context capabilities to be dropped | `["ALL"]` | -| `controller.automountServiceAccountToken` | Mount Service Account token in pod | `false` | -| `controller.hostAliases` | Kafka pods host aliases | `[]` | -| `controller.hostNetwork` | Specify if host network should be enabled for Kafka pods | `false` | -| `controller.hostIPC` | Specify if host IPC should be enabled for Kafka pods | `false` | -| `controller.podLabels` | Extra labels for Kafka pods | `{}` | -| `controller.podAnnotations` | Extra annotations for Kafka pods | `{}` | -| `controller.podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | -| `controller.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | -| `controller.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | -| `controller.nodeAffinityPreset.key` | Node label key to match Ignored if `affinity` is set. | `""` | -| `controller.nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | -| `controller.affinity` | Affinity for pod assignment | `{}` | -| `controller.nodeSelector` | Node labels for pod assignment | `{}` | -| `controller.tolerations` | Tolerations for pod assignment | `[]` | -| `controller.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | -| `controller.terminationGracePeriodSeconds` | Seconds the pod needs to gracefully terminate | `""` | -| `controller.podManagementPolicy` | StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel | `Parallel` | -| `controller.minReadySeconds` | How many seconds a pod needs to be ready before killing the next, during update | `0` | -| `controller.priorityClassName` | Name of the existing priority class to be used by kafka pods | `""` | -| `controller.runtimeClassName` | Name of the runtime class to be used by pod(s) | `""` | -| `controller.enableServiceLinks` | Whether information about services should be injected into pod's environment variable | `true` | -| `controller.schedulerName` | Name of the k8s scheduler (other than default) | `""` | -| `controller.updateStrategy.type` | Kafka statefulset strategy type | `RollingUpdate` | -| `controller.extraVolumes` | Optionally specify extra list of additional volumes for the Kafka pod(s) | `[]` | -| `controller.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Kafka container(s) | `[]` | -| `controller.sidecars` | Add additional sidecar containers to the Kafka pod(s) | `[]` | -| `controller.initContainers` | Add additional Add init containers to the Kafka pod(s) | `[]` | -| `controller.pdb.create` | Deploy a pdb object for the Kafka pod | `false` | -| `controller.pdb.minAvailable` | Maximum number/percentage of unavailable Kafka replicas | `""` | -| `controller.pdb.maxUnavailable` | Maximum number/percentage of unavailable Kafka replicas | `1` | -| `controller.persistence.enabled` | Enable Kafka data persistence using PVC, note that ZooKeeper persistence is unaffected | `true` | -| `controller.persistence.existingClaim` | A manually managed Persistent Volume and Claim | `""` | -| `controller.persistence.storageClass` | PVC Storage Class for Kafka data volume | `""` | -| `controller.persistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` | -| `controller.persistence.size` | PVC Storage Request for Kafka data volume | `8Gi` | -| `controller.persistence.annotations` | Annotations for the PVC | `{}` | -| `controller.persistence.labels` | Labels for the PVC | `{}` | -| `controller.persistence.selector` | Selector to match an existing Persistent Volume for Kafka data PVC. If set, the PVC can't have a PV dynamically provisioned for it | `{}` | -| `controller.persistence.mountPath` | Mount path of the Kafka data volume | `/bitnami/kafka` | -| `controller.logPersistence.enabled` | Enable Kafka logs persistence using PVC, note that ZooKeeper persistence is unaffected | `false` | -| `controller.logPersistence.existingClaim` | A manually managed Persistent Volume and Claim | `""` | -| `controller.logPersistence.storageClass` | PVC Storage Class for Kafka logs volume | `""` | -| `controller.logPersistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` | -| `controller.logPersistence.size` | PVC Storage Request for Kafka logs volume | `8Gi` | -| `controller.logPersistence.annotations` | Annotations for the PVC | `{}` | -| `controller.logPersistence.selector` | Selector to match an existing Persistent Volume for Kafka log data PVC. If set, the PVC can't have a PV dynamically provisioned for it | `{}` | -| `controller.logPersistence.mountPath` | Mount path of the Kafka logs volume | `/opt/bitnami/kafka/logs` | - -### Broker-only statefulset parameters - -| Name | Description | Value | -| ---------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------- | -| `broker.replicaCount` | Number of Kafka broker-only nodes | `0` | -| `broker.minId` | Minimal node.id values for broker-only nodes. Do not change after first initialization. | `100` | -| `broker.zookeeperMigrationMode` | Set to true to deploy cluster controller quorum | `false` | -| `broker.config` | Configuration file for Kafka broker-only nodes, rendered as a template. Auto-generated based on chart values when not specified. | `""` | -| `broker.existingConfigmap` | ConfigMap with Kafka Configuration for broker-only nodes. | `""` | -| `broker.extraConfig` | Additional configuration to be appended at the end of the generated Kafka broker-only nodes configuration file. | `""` | -| `broker.secretConfig` | Additional configuration to be appended at the end of the generated Kafka broker-only nodes configuration file. | `""` | -| `broker.existingSecretConfig` | Secret with additonal configuration that will be appended to the end of the generated Kafka broker-only nodes configuration file | `""` | -| `broker.heapOpts` | Kafka Java Heap size for broker-only nodes | `-Xmx1024m -Xms1024m` | -| `broker.command` | Override Kafka container command | `[]` | -| `broker.args` | Override Kafka container arguments | `[]` | -| `broker.extraEnvVars` | Extra environment variables to add to Kafka pods | `[]` | -| `broker.extraEnvVarsCM` | ConfigMap with extra environment variables | `""` | -| `broker.extraEnvVarsSecret` | Secret with extra environment variables | `""` | -| `broker.extraContainerPorts` | Kafka broker-only extra containerPorts. | `[]` | -| `broker.livenessProbe.enabled` | Enable livenessProbe on Kafka containers | `true` | -| `broker.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `10` | -| `broker.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | -| `broker.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | -| `broker.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` | -| `broker.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | -| `broker.readinessProbe.enabled` | Enable readinessProbe on Kafka containers | `true` | -| `broker.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | -| `broker.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | -| `broker.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | -| `broker.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | -| `broker.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | -| `broker.startupProbe.enabled` | Enable startupProbe on Kafka containers | `false` | -| `broker.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` | -| `broker.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | -| `broker.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | -| `broker.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | -| `broker.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | -| `broker.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | -| `broker.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | -| `broker.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | -| `broker.lifecycleHooks` | lifecycleHooks for the Kafka container to automate configuration before or after startup | `{}` | -| `broker.initContainerResources.limits` | The resources limits for the container | `{}` | -| `broker.initContainerResources.requests` | The requested resources for the container | `{}` | -| `broker.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if broker.resources is set (broker.resources is recommended for production). | `none` | -| `broker.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | -| `broker.podSecurityContext.enabled` | Enable security context for the pods | `true` | -| `broker.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | -| `broker.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | -| `broker.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | -| `broker.podSecurityContext.fsGroup` | Set Kafka pod's Security Context fsGroup | `1001` | -| `broker.podSecurityContext.seccompProfile.type` | Set Kafka pod's Security Context seccomp profile | `RuntimeDefault` | -| `broker.containerSecurityContext.enabled` | Enable Kafka containers' Security Context | `true` | -| `broker.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `nil` | -| `broker.containerSecurityContext.runAsUser` | Set Kafka containers' Security Context runAsUser | `1001` | -| `broker.containerSecurityContext.runAsNonRoot` | Set Kafka containers' Security Context runAsNonRoot | `true` | -| `broker.containerSecurityContext.allowPrivilegeEscalation` | Force the child process to be run as non-privileged | `false` | -| `broker.containerSecurityContext.readOnlyRootFilesystem` | Allows the pod to mount the RootFS as ReadOnly only | `true` | -| `broker.containerSecurityContext.capabilities.drop` | Set Kafka containers' server Security Context capabilities to be dropped | `["ALL"]` | -| `broker.automountServiceAccountToken` | Mount Service Account token in pod | `false` | -| `broker.hostAliases` | Kafka pods host aliases | `[]` | -| `broker.hostNetwork` | Specify if host network should be enabled for Kafka pods | `false` | -| `broker.hostIPC` | Specify if host IPC should be enabled for Kafka pods | `false` | -| `broker.podLabels` | Extra labels for Kafka pods | `{}` | -| `broker.podAnnotations` | Extra annotations for Kafka pods | `{}` | -| `broker.podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | -| `broker.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | -| `broker.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | -| `broker.nodeAffinityPreset.key` | Node label key to match Ignored if `affinity` is set. | `""` | -| `broker.nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | -| `broker.affinity` | Affinity for pod assignment | `{}` | -| `broker.nodeSelector` | Node labels for pod assignment | `{}` | -| `broker.tolerations` | Tolerations for pod assignment | `[]` | -| `broker.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | -| `broker.terminationGracePeriodSeconds` | Seconds the pod needs to gracefully terminate | `""` | -| `broker.podManagementPolicy` | StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel | `Parallel` | -| `broker.minReadySeconds` | How many seconds a pod needs to be ready before killing the next, during update | `0` | -| `broker.priorityClassName` | Name of the existing priority class to be used by kafka pods | `""` | -| `broker.runtimeClassName` | Name of the runtime class to be used by pod(s) | `""` | -| `broker.enableServiceLinks` | Whether information about services should be injected into pod's environment variable | `true` | -| `broker.schedulerName` | Name of the k8s scheduler (other than default) | `""` | -| `broker.updateStrategy.type` | Kafka statefulset strategy type | `RollingUpdate` | -| `broker.extraVolumes` | Optionally specify extra list of additional volumes for the Kafka pod(s) | `[]` | -| `broker.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Kafka container(s) | `[]` | -| `broker.sidecars` | Add additional sidecar containers to the Kafka pod(s) | `[]` | -| `broker.initContainers` | Add additional Add init containers to the Kafka pod(s) | `[]` | -| `broker.pdb.create` | Deploy a pdb object for the Kafka pod | `false` | -| `broker.pdb.minAvailable` | Maximum number/percentage of unavailable Kafka replicas | `""` | -| `broker.pdb.maxUnavailable` | Maximum number/percentage of unavailable Kafka replicas | `1` | -| `broker.persistence.enabled` | Enable Kafka data persistence using PVC, note that ZooKeeper persistence is unaffected | `true` | -| `broker.persistence.existingClaim` | A manually managed Persistent Volume and Claim | `""` | -| `broker.persistence.storageClass` | PVC Storage Class for Kafka data volume | `""` | -| `broker.persistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` | -| `broker.persistence.size` | PVC Storage Request for Kafka data volume | `8Gi` | -| `broker.persistence.annotations` | Annotations for the PVC | `{}` | -| `broker.persistence.labels` | Labels for the PVC | `{}` | -| `broker.persistence.selector` | Selector to match an existing Persistent Volume for Kafka data PVC. If set, the PVC can't have a PV dynamically provisioned for it | `{}` | -| `broker.persistence.mountPath` | Mount path of the Kafka data volume | `/bitnami/kafka` | -| `broker.logPersistence.enabled` | Enable Kafka logs persistence using PVC, note that ZooKeeper persistence is unaffected | `false` | -| `broker.logPersistence.existingClaim` | A manually managed Persistent Volume and Claim | `""` | -| `broker.logPersistence.storageClass` | PVC Storage Class for Kafka logs volume | `""` | -| `broker.logPersistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` | -| `broker.logPersistence.size` | PVC Storage Request for Kafka logs volume | `8Gi` | -| `broker.logPersistence.annotations` | Annotations for the PVC | `{}` | -| `broker.logPersistence.selector` | Selector to match an existing Persistent Volume for Kafka log data PVC. If set, the PVC can't have a PV dynamically provisioned for it | `{}` | -| `broker.logPersistence.mountPath` | Mount path of the Kafka logs volume | `/opt/bitnami/kafka/logs` | - -### Traffic Exposure parameters - -| Name | Description | Value | -| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------- | -| `service.type` | Kubernetes Service type | `ClusterIP` | -| `service.ports.client` | Kafka svc port for client connections | `9092` | -| `service.ports.controller` | Kafka svc port for controller connections. It is used if "kraft.enabled: true" | `9093` | -| `service.ports.interbroker` | Kafka svc port for inter-broker connections | `9094` | -| `service.ports.external` | Kafka svc port for external connections | `9095` | -| `service.extraPorts` | Extra ports to expose in the Kafka service (normally used with the `sidecar` value) | `[]` | -| `service.nodePorts.client` | Node port for the Kafka client connections | `""` | -| `service.nodePorts.external` | Node port for the Kafka external connections | `""` | -| `service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | -| `service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | -| `service.clusterIP` | Kafka service Cluster IP | `""` | -| `service.loadBalancerIP` | Kafka service Load Balancer IP | `""` | -| `service.loadBalancerSourceRanges` | Kafka service Load Balancer sources | `[]` | -| `service.allocateLoadBalancerNodePorts` | Whether to allocate node ports when service type is LoadBalancer | `true` | -| `service.externalTrafficPolicy` | Kafka service external traffic policy | `Cluster` | -| `service.annotations` | Additional custom annotations for Kafka service | `{}` | -| `service.headless.controller.annotations` | Annotations for the controller-eligible headless service. | `{}` | -| `service.headless.controller.labels` | Labels for the controller-eligible headless service. | `{}` | -| `service.headless.broker.annotations` | Annotations for the broker-only headless service. | `{}` | -| `service.headless.broker.labels` | Labels for the broker-only headless service. | `{}` | -| `externalAccess.enabled` | Enable Kubernetes external cluster access to Kafka brokers | `false` | -| `externalAccess.autoDiscovery.enabled` | Enable using an init container to auto-detect external IPs/ports by querying the K8s API | `false` | -| `externalAccess.autoDiscovery.image.registry` | Init container auto-discovery image registry | `REGISTRY_NAME` | -| `externalAccess.autoDiscovery.image.repository` | Init container auto-discovery image repository | `REPOSITORY_NAME/kubectl` | -| `externalAccess.autoDiscovery.image.digest` | Kubectl image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | -| `externalAccess.autoDiscovery.image.pullPolicy` | Init container auto-discovery image pull policy | `IfNotPresent` | -| `externalAccess.autoDiscovery.image.pullSecrets` | Init container auto-discovery image pull secrets | `[]` | -| `externalAccess.autoDiscovery.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if externalAccess.autoDiscovery.resources is set (externalAccess.autoDiscovery.resources is recommended for production). | `none` | -| `externalAccess.autoDiscovery.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | -| `externalAccess.autoDiscovery.containerSecurityContext.enabled` | Enable Kafka auto-discovery containers' Security Context | `true` | -| `externalAccess.autoDiscovery.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `nil` | -| `externalAccess.autoDiscovery.containerSecurityContext.runAsUser` | Set Kafka auto-discovery containers' Security Context runAsUser | `1001` | -| `externalAccess.autoDiscovery.containerSecurityContext.runAsNonRoot` | Set Kafka auto-discovery containers' Security Context runAsNonRoot | `true` | -| `externalAccess.autoDiscovery.containerSecurityContext.allowPrivilegeEscalation` | Set Kafka auto-discovery containers' Security Context allowPrivilegeEscalation | `false` | -| `externalAccess.autoDiscovery.containerSecurityContext.readOnlyRootFilesystem` | Set Kafka auto-discovery containers' Security Context readOnlyRootFilesystem | `true` | -| `externalAccess.autoDiscovery.containerSecurityContext.capabilities.drop` | Set Kafka auto-discovery containers' Security Context capabilities to be dropped | `["ALL"]` | -| `externalAccess.autoDiscovery.containerSecurityContext.seccompProfile.type` | Set Kafka auto-discovery seccomp profile type | `RuntimeDefault` | -| `externalAccess.controller.forceExpose` | If set to true, force exposing controller-eligible nodes although they are configured as controller-only nodes | `false` | -| `externalAccess.controller.service.type` | Kubernetes Service type for external access. It can be NodePort, LoadBalancer or ClusterIP | `LoadBalancer` | -| `externalAccess.controller.service.ports.external` | Kafka port used for external access when service type is LoadBalancer | `9094` | -| `externalAccess.controller.service.loadBalancerIPs` | Array of load balancer IPs for each Kafka broker. Length must be the same as replicaCount | `[]` | -| `externalAccess.controller.service.loadBalancerNames` | Array of load balancer Names for each Kafka broker. Length must be the same as replicaCount | `[]` | -| `externalAccess.controller.service.loadBalancerAnnotations` | Array of load balancer annotations for each Kafka broker. Length must be the same as replicaCount | `[]` | -| `externalAccess.controller.service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | `[]` | -| `externalAccess.controller.service.allocateLoadBalancerNodePorts` | Whether to allocate node ports when service type is LoadBalancer | `true` | -| `externalAccess.controller.service.nodePorts` | Array of node ports used for each Kafka broker. Length must be the same as replicaCount | `[]` | -| `externalAccess.controller.service.externalIPs` | Use distinct service host IPs to configure Kafka external listener when service type is NodePort. Length must be the same as replicaCount | `[]` | -| `externalAccess.controller.service.useHostIPs` | Use service host IPs to configure Kafka external listener when service type is NodePort | `false` | -| `externalAccess.controller.service.usePodIPs` | using the MY_POD_IP address for external access. | `false` | -| `externalAccess.controller.service.domain` | Domain or external ip used to configure Kafka external listener when service type is NodePort or ClusterIP | `""` | -| `externalAccess.controller.service.publishNotReadyAddresses` | Indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready | `false` | -| `externalAccess.controller.service.labels` | Service labels for external access | `{}` | -| `externalAccess.controller.service.annotations` | Service annotations for external access | `{}` | -| `externalAccess.controller.service.extraPorts` | Extra ports to expose in the Kafka external service | `[]` | -| `externalAccess.broker.service.type` | Kubernetes Service type for external access. It can be NodePort, LoadBalancer or ClusterIP | `LoadBalancer` | -| `externalAccess.broker.service.ports.external` | Kafka port used for external access when service type is LoadBalancer | `9094` | -| `externalAccess.broker.service.loadBalancerIPs` | Array of load balancer IPs for each Kafka broker. Length must be the same as replicaCount | `[]` | -| `externalAccess.broker.service.loadBalancerNames` | Array of load balancer Names for each Kafka broker. Length must be the same as replicaCount | `[]` | -| `externalAccess.broker.service.loadBalancerAnnotations` | Array of load balancer annotations for each Kafka broker. Length must be the same as replicaCount | `[]` | -| `externalAccess.broker.service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | `[]` | -| `externalAccess.broker.service.allocateLoadBalancerNodePorts` | Whether to allocate node ports when service type is LoadBalancer | `true` | -| `externalAccess.broker.service.nodePorts` | Array of node ports used for each Kafka broker. Length must be the same as replicaCount | `[]` | -| `externalAccess.broker.service.externalIPs` | Use distinct service host IPs to configure Kafka external listener when service type is NodePort. Length must be the same as replicaCount | `[]` | -| `externalAccess.broker.service.useHostIPs` | Use service host IPs to configure Kafka external listener when service type is NodePort | `false` | -| `externalAccess.broker.service.usePodIPs` | using the MY_POD_IP address for external access. | `false` | -| `externalAccess.broker.service.domain` | Domain or external ip used to configure Kafka external listener when service type is NodePort or ClusterIP | `""` | -| `externalAccess.broker.service.publishNotReadyAddresses` | Indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready | `false` | -| `externalAccess.broker.service.labels` | Service labels for external access | `{}` | -| `externalAccess.broker.service.annotations` | Service annotations for external access | `{}` | -| `externalAccess.broker.service.extraPorts` | Extra ports to expose in the Kafka external service | `[]` | -| `networkPolicy.enabled` | Specifies whether a NetworkPolicy should be created | `false` | -| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | -| `networkPolicy.explicitNamespacesSelector` | A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed | `{}` | -| `networkPolicy.externalAccess.from` | customize the from section for External Access on tcp-external port | `[]` | -| `networkPolicy.egressRules.customRules` | Custom network policy rule | `{}` | - -### Volume Permissions parameters - -| Name | Description | Value | -| ----------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------- | -| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume | `false` | -| `volumePermissions.image.registry` | Init container volume-permissions image registry | `REGISTRY_NAME` | -| `volumePermissions.image.repository` | Init container volume-permissions image repository | `REPOSITORY_NAME/os-shell` | -| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | -| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | -| `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` | -| `volumePermissions.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production). | `none` | -| `volumePermissions.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | -| `volumePermissions.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `nil` | -| `volumePermissions.containerSecurityContext.runAsUser` | User ID for the init container | `0` | - -### Other Parameters - -| Name | Description | Value | -| --------------------------------------------- | ---------------------------------------------------------------------------------------------- | ------- | -| `serviceAccount.create` | Enable creation of ServiceAccount for Kafka pods | `true` | -| `serviceAccount.name` | The name of the service account to use. If not set and `create` is `true`, a name is generated | `""` | -| `serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `false` | -| `serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` | -| `rbac.create` | Whether to create & use RBAC resources or not | `false` | - -### Metrics parameters - -| Name | Description | Value | -| ----------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------- | -| `metrics.kafka.enabled` | Whether or not to create a standalone Kafka exporter to expose Kafka metrics | `false` | -| `metrics.kafka.image.registry` | Kafka exporter image registry | `REGISTRY_NAME` | -| `metrics.kafka.image.repository` | Kafka exporter image repository | `REPOSITORY_NAME/kafka-exporter` | -| `metrics.kafka.image.digest` | Kafka exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | -| `metrics.kafka.image.pullPolicy` | Kafka exporter image pull policy | `IfNotPresent` | -| `metrics.kafka.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | -| `metrics.kafka.certificatesSecret` | Name of the existing secret containing the optional certificate and key files | `""` | -| `metrics.kafka.tlsCert` | The secret key from the certificatesSecret if 'client-cert' key different from the default (cert-file) | `cert-file` | -| `metrics.kafka.tlsKey` | The secret key from the certificatesSecret if 'client-key' key different from the default (key-file) | `key-file` | -| `metrics.kafka.tlsCaSecret` | Name of the existing secret containing the optional ca certificate for Kafka exporter client authentication | `""` | -| `metrics.kafka.tlsCaCert` | The secret key from the certificatesSecret or tlsCaSecret if 'ca-cert' key different from the default (ca-file) | `ca-file` | -| `metrics.kafka.extraFlags` | Extra flags to be passed to Kafka exporter | `{}` | -| `metrics.kafka.command` | Override Kafka exporter container command | `[]` | -| `metrics.kafka.args` | Override Kafka exporter container arguments | `[]` | -| `metrics.kafka.containerPorts.metrics` | Kafka exporter metrics container port | `9308` | -| `metrics.kafka.livenessProbe.enabled` | Enable livenessProbe | `true` | -| `metrics.kafka.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` | -| `metrics.kafka.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | -| `metrics.kafka.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `1` | -| `metrics.kafka.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` | -| `metrics.kafka.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | -| `metrics.kafka.readinessProbe.enabled` | Enable readinessProbe | `true` | -| `metrics.kafka.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | -| `metrics.kafka.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `5` | -| `metrics.kafka.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `1` | -| `metrics.kafka.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` | -| `metrics.kafka.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | -| `metrics.kafka.startupProbe.enabled` | Enable startupProbe | `false` | -| `metrics.kafka.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `5` | -| `metrics.kafka.startupProbe.periodSeconds` | Period seconds for startupProbe | `5` | -| `metrics.kafka.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | -| `metrics.kafka.startupProbe.failureThreshold` | Failure threshold for startupProbe | `3` | -| `metrics.kafka.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | -| `metrics.kafka.customStartupProbe` | Override default startup probe | `{}` | -| `metrics.kafka.customLivenessProbe` | Override default liveness probe | `{}` | -| `metrics.kafka.customReadinessProbe` | Override default readiness probe | `{}` | -| `metrics.kafka.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if metrics.kafka.resources is set (metrics.kafka.resources is recommended for production). | `none` | -| `metrics.kafka.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | -| `metrics.kafka.podSecurityContext.enabled` | Enable security context for the pods | `true` | -| `metrics.kafka.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | -| `metrics.kafka.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | -| `metrics.kafka.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | -| `metrics.kafka.podSecurityContext.fsGroup` | Set Kafka exporter pod's Security Context fsGroup | `1001` | -| `metrics.kafka.podSecurityContext.seccompProfile.type` | Set Kafka exporter pod's Security Context seccomp profile | `RuntimeDefault` | -| `metrics.kafka.containerSecurityContext.enabled` | Enable Kafka exporter containers' Security Context | `true` | -| `metrics.kafka.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `nil` | -| `metrics.kafka.containerSecurityContext.runAsUser` | Set Kafka exporter containers' Security Context runAsUser | `1001` | -| `metrics.kafka.containerSecurityContext.runAsNonRoot` | Set Kafka exporter containers' Security Context runAsNonRoot | `true` | -| `metrics.kafka.containerSecurityContext.allowPrivilegeEscalation` | Set Kafka exporter containers' Security Context allowPrivilegeEscalation | `false` | -| `metrics.kafka.containerSecurityContext.readOnlyRootFilesystem` | Set Kafka exporter containers' Security Context readOnlyRootFilesystem | `true` | -| `metrics.kafka.containerSecurityContext.capabilities.drop` | Set Kafka exporter containers' Security Context capabilities to be dropped | `["ALL"]` | -| `metrics.kafka.automountServiceAccountToken` | Mount Service Account token in pod | `false` | -| `metrics.kafka.hostAliases` | Kafka exporter pods host aliases | `[]` | -| `metrics.kafka.podLabels` | Extra labels for Kafka exporter pods | `{}` | -| `metrics.kafka.podAnnotations` | Extra annotations for Kafka exporter pods | `{}` | -| `metrics.kafka.podAffinityPreset` | Pod affinity preset. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard` | `""` | -| `metrics.kafka.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard` | `soft` | -| `metrics.kafka.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard` | `""` | -| `metrics.kafka.nodeAffinityPreset.key` | Node label key to match Ignored if `metrics.kafka.affinity` is set. | `""` | -| `metrics.kafka.nodeAffinityPreset.values` | Node label values to match. Ignored if `metrics.kafka.affinity` is set. | `[]` | -| `metrics.kafka.affinity` | Affinity for pod assignment | `{}` | -| `metrics.kafka.nodeSelector` | Node labels for pod assignment | `{}` | -| `metrics.kafka.tolerations` | Tolerations for pod assignment | `[]` | -| `metrics.kafka.schedulerName` | Name of the k8s scheduler (other than default) for Kafka exporter | `""` | -| `metrics.kafka.enableServiceLinks` | Whether information about services should be injected into pod's environment variable | `true` | -| `metrics.kafka.priorityClassName` | Kafka exporter pods' priorityClassName | `""` | -| `metrics.kafka.topologySpreadConstraints` | Topology Spread Constraints for pod assignment | `[]` | -| `metrics.kafka.extraVolumes` | Optionally specify extra list of additional volumes for the Kafka exporter pod(s) | `[]` | -| `metrics.kafka.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Kafka exporter container(s) | `[]` | -| `metrics.kafka.sidecars` | Add additional sidecar containers to the Kafka exporter pod(s) | `[]` | -| `metrics.kafka.initContainers` | Add init containers to the Kafka exporter pods | `[]` | -| `metrics.kafka.service.ports.metrics` | Kafka exporter metrics service port | `9308` | -| `metrics.kafka.service.clusterIP` | Static clusterIP or None for headless services | `""` | -| `metrics.kafka.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | -| `metrics.kafka.service.annotations` | Annotations for the Kafka exporter service | `{}` | -| `metrics.kafka.serviceAccount.create` | Enable creation of ServiceAccount for Kafka exporter pods | `true` | -| `metrics.kafka.serviceAccount.name` | The name of the service account to use. If not set and `create` is `true`, a name is generated | `""` | -| `metrics.kafka.serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `false` | -| `metrics.jmx.enabled` | Whether or not to expose JMX metrics to Prometheus | `false` | -| `metrics.jmx.kafkaJmxPort` | JMX port where the exporter will collect metrics, exposed in the Kafka container. | `5555` | -| `metrics.jmx.image.registry` | JMX exporter image registry | `REGISTRY_NAME` | -| `metrics.jmx.image.repository` | JMX exporter image repository | `REPOSITORY_NAME/jmx-exporter` | -| `metrics.jmx.image.digest` | JMX exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | -| `metrics.jmx.image.pullPolicy` | JMX exporter image pull policy | `IfNotPresent` | -| `metrics.jmx.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | -| `metrics.jmx.containerSecurityContext.enabled` | Enable Prometheus JMX exporter containers' Security Context | `true` | -| `metrics.jmx.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `nil` | -| `metrics.jmx.containerSecurityContext.runAsUser` | Set Prometheus JMX exporter containers' Security Context runAsUser | `1001` | -| `metrics.jmx.containerSecurityContext.runAsNonRoot` | Set Prometheus JMX exporter containers' Security Context runAsNonRoot | `true` | -| `metrics.jmx.containerSecurityContext.allowPrivilegeEscalation` | Set Prometheus JMX exporter containers' Security Context allowPrivilegeEscalation | `false` | -| `metrics.jmx.containerSecurityContext.readOnlyRootFilesystem` | Set Prometheus JMX exporter containers' Security Context readOnlyRootFilesystem | `true` | -| `metrics.jmx.containerSecurityContext.capabilities.drop` | Set Prometheus JMX exporter containers' Security Context capabilities to be dropped | `["ALL"]` | -| `metrics.jmx.containerPorts.metrics` | Prometheus JMX exporter metrics container port | `5556` | -| `metrics.jmx.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if metrics.jmx.resources is set (metrics.jmx.resources is recommended for production). | `none` | -| `metrics.jmx.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | -| `metrics.jmx.service.ports.metrics` | Prometheus JMX exporter metrics service port | `5556` | -| `metrics.jmx.service.clusterIP` | Static clusterIP or None for headless services | `""` | -| `metrics.jmx.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | -| `metrics.jmx.service.annotations` | Annotations for the Prometheus JMX exporter service | `{}` | -| `metrics.jmx.whitelistObjectNames` | Allows setting which JMX objects you want to expose to via JMX stats to JMX exporter | `["kafka.controller:*","kafka.server:*","java.lang:*","kafka.network:*","kafka.log:*"]` | -| `metrics.jmx.config` | Configuration file for JMX exporter | `""` | -| `metrics.jmx.existingConfigmap` | Name of existing ConfigMap with JMX exporter configuration | `""` | -| `metrics.jmx.extraRules` | Add extra rules to JMX exporter configuration | `""` | -| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (requires `metrics.kafka.enabled` or `metrics.jmx.enabled` to be `true`) | `false` | -| `metrics.serviceMonitor.namespace` | Namespace in which Prometheus is running | `""` | -| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped | `""` | -| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | -| `metrics.serviceMonitor.labels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | -| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | -| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | -| `metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | -| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | -| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | -| `metrics.prometheusRule.enabled` | if `true`, creates a Prometheus Operator PrometheusRule (requires `metrics.kafka.enabled` or `metrics.jmx.enabled` to be `true`) | `false` | -| `metrics.prometheusRule.namespace` | Namespace in which Prometheus is running | `""` | -| `metrics.prometheusRule.labels` | Additional labels that can be used so PrometheusRule will be discovered by Prometheus | `{}` | -| `metrics.prometheusRule.groups` | Prometheus Rule Groups for Kafka | `[]` | - -### Kafka provisioning parameters - -| Name | Description | Value | -| ---------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------- | -| `provisioning.enabled` | Enable kafka provisioning Job | `false` | -| `provisioning.automountServiceAccountToken` | Mount Service Account token in pod | `false` | -| `provisioning.numPartitions` | Default number of partitions for topics when unspecified | `1` | -| `provisioning.replicationFactor` | Default replication factor for topics when unspecified | `1` | -| `provisioning.topics` | Kafka topics to provision | `[]` | -| `provisioning.nodeSelector` | Node labels for pod assignment | `{}` | -| `provisioning.tolerations` | Tolerations for pod assignment | `[]` | -| `provisioning.extraProvisioningCommands` | Extra commands to run to provision cluster resources | `[]` | -| `provisioning.parallel` | Number of provisioning commands to run at the same time | `1` | -| `provisioning.preScript` | Extra bash script to run before topic provisioning. $CLIENT_CONF is path to properties file with most needed configurations | `""` | -| `provisioning.postScript` | Extra bash script to run after topic provisioning. $CLIENT_CONF is path to properties file with most needed configurations | `""` | -| `provisioning.auth.tls.type` | Format to use for TLS certificates. Allowed types: `JKS` and `PEM`. | `jks` | -| `provisioning.auth.tls.certificatesSecret` | Existing secret containing the TLS certificates for the Kafka provisioning Job. | `""` | -| `provisioning.auth.tls.cert` | The secret key from the certificatesSecret if 'cert' key different from the default (tls.crt) | `tls.crt` | -| `provisioning.auth.tls.key` | The secret key from the certificatesSecret if 'key' key different from the default (tls.key) | `tls.key` | -| `provisioning.auth.tls.caCert` | The secret key from the certificatesSecret if 'caCert' key different from the default (ca.crt) | `ca.crt` | -| `provisioning.auth.tls.keystore` | The secret key from the certificatesSecret if 'keystore' key different from the default (keystore.jks) | `keystore.jks` | -| `provisioning.auth.tls.truststore` | The secret key from the certificatesSecret if 'truststore' key different from the default (truststore.jks) | `truststore.jks` | -| `provisioning.auth.tls.passwordsSecret` | Name of the secret containing passwords to access the JKS files or PEM key when they are password-protected. | `""` | -| `provisioning.auth.tls.keyPasswordSecretKey` | The secret key from the passwordsSecret if 'keyPasswordSecretKey' key different from the default (key-password) | `key-password` | -| `provisioning.auth.tls.keystorePasswordSecretKey` | The secret key from the passwordsSecret if 'keystorePasswordSecretKey' key different from the default (keystore-password) | `keystore-password` | -| `provisioning.auth.tls.truststorePasswordSecretKey` | The secret key from the passwordsSecret if 'truststorePasswordSecretKey' key different from the default (truststore-password) | `truststore-password` | -| `provisioning.auth.tls.keyPassword` | Password to access the password-protected PEM key if necessary. Ignored if 'passwordsSecret' is provided. | `""` | -| `provisioning.auth.tls.keystorePassword` | Password to access the JKS keystore. Ignored if 'passwordsSecret' is provided. | `""` | -| `provisioning.auth.tls.truststorePassword` | Password to access the JKS truststore. Ignored if 'passwordsSecret' is provided. | `""` | -| `provisioning.command` | Override provisioning container command | `[]` | -| `provisioning.args` | Override provisioning container arguments | `[]` | -| `provisioning.extraEnvVars` | Extra environment variables to add to the provisioning pod | `[]` | -| `provisioning.extraEnvVarsCM` | ConfigMap with extra environment variables | `""` | -| `provisioning.extraEnvVarsSecret` | Secret with extra environment variables | `""` | -| `provisioning.podAnnotations` | Extra annotations for Kafka provisioning pods | `{}` | -| `provisioning.podLabels` | Extra labels for Kafka provisioning pods | `{}` | -| `provisioning.serviceAccount.create` | Enable creation of ServiceAccount for Kafka provisioning pods | `true` | -| `provisioning.serviceAccount.name` | The name of the service account to use. If not set and `create` is `true`, a name is generated | `""` | -| `provisioning.serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `false` | -| `provisioning.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if provisioning.resources is set (provisioning.resources is recommended for production). | `none` | -| `provisioning.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | -| `provisioning.podSecurityContext.enabled` | Enable security context for the pods | `true` | -| `provisioning.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | -| `provisioning.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | -| `provisioning.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | -| `provisioning.podSecurityContext.fsGroup` | Set Kafka provisioning pod's Security Context fsGroup | `1001` | -| `provisioning.podSecurityContext.seccompProfile.type` | Set Kafka provisioning pod's Security Context seccomp profile | `RuntimeDefault` | -| `provisioning.containerSecurityContext.enabled` | Enable Kafka provisioning containers' Security Context | `true` | -| `provisioning.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `nil` | -| `provisioning.containerSecurityContext.runAsUser` | Set Kafka provisioning containers' Security Context runAsUser | `1001` | -| `provisioning.containerSecurityContext.runAsNonRoot` | Set Kafka provisioning containers' Security Context runAsNonRoot | `true` | -| `provisioning.containerSecurityContext.allowPrivilegeEscalation` | Set Kafka provisioning containers' Security Context allowPrivilegeEscalation | `false` | -| `provisioning.containerSecurityContext.readOnlyRootFilesystem` | Set Kafka provisioning containers' Security Context readOnlyRootFilesystem | `true` | -| `provisioning.containerSecurityContext.capabilities.drop` | Set Kafka provisioning containers' Security Context capabilities to be dropped | `["ALL"]` | -| `provisioning.schedulerName` | Name of the k8s scheduler (other than default) for kafka provisioning | `""` | -| `provisioning.enableServiceLinks` | Whether information about services should be injected into pod's environment variable | `true` | -| `provisioning.extraVolumes` | Optionally specify extra list of additional volumes for the Kafka provisioning pod(s) | `[]` | -| `provisioning.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Kafka provisioning container(s) | `[]` | -| `provisioning.sidecars` | Add additional sidecar containers to the Kafka provisioning pod(s) | `[]` | -| `provisioning.initContainers` | Add additional Add init containers to the Kafka provisioning pod(s) | `[]` | -| `provisioning.waitForKafka` | If true use an init container to wait until kafka is ready before starting provisioning | `true` | -| `provisioning.useHelmHooks` | Flag to indicate usage of helm hooks | `true` | - -### KRaft chart parameters - -| Name | Description | Value | -| ------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------ | -| `kraft.enabled` | Switch to enable or disable the KRaft mode for Kafka | `true` | -| `kraft.existingClusterIdSecret` | Name of the secret containing the cluster ID for the Kafka KRaft cluster. This is incompatible with the clusterId parameter. If both are set, the existingClusterIdSecret will be used | `""` | -| `kraft.clusterId` | Kafka Kraft cluster ID. If not set, a random cluster ID will be generated the first time Kraft is initialized. | `""` | -| `kraft.controllerQuorumVoters` | Override the Kafka controller quorum voters of the Kafka Kraft cluster. If not set, it will be automatically configured to use all controller-elegible nodes. | `""` | - -### ZooKeeper chart parameters - -| Name | Description | Value | -| --------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------- | -| `zookeeperChrootPath` | Path which puts data under some path in the global ZooKeeper namespace | `""` | -| `zookeeper.enabled` | Switch to enable or disable the ZooKeeper helm chart. Must be false if you use KRaft mode. | `false` | -| `zookeeper.replicaCount` | Number of ZooKeeper nodes | `1` | -| `zookeeper.auth.client.enabled` | Enable ZooKeeper auth | `false` | -| `zookeeper.auth.client.clientUser` | User that will use ZooKeeper client (zkCli.sh) to authenticate. Must exist in the serverUsers comma-separated list. | `""` | -| `zookeeper.auth.client.clientPassword` | Password that will use ZooKeeper client (zkCli.sh) to authenticate. Must exist in the serverPasswords comma-separated list. | `""` | -| `zookeeper.auth.client.serverUsers` | Comma, semicolon or whitespace separated list of user to be created. Specify them as a string, for example: "user1,user2,admin" | `""` | -| `zookeeper.auth.client.serverPasswords` | Comma, semicolon or whitespace separated list of passwords to assign to users when created. Specify them as a string, for example: "pass4user1, pass4user2, pass4admin" | `""` | -| `zookeeper.persistence.enabled` | Enable persistence on ZooKeeper using PVC(s) | `true` | -| `zookeeper.persistence.storageClass` | Persistent Volume storage class | `""` | -| `zookeeper.persistence.accessModes` | Persistent Volume access modes | `["ReadWriteOnce"]` | -| `zookeeper.persistence.size` | Persistent Volume size | `8Gi` | -| `externalZookeeper.servers` | List of external zookeeper servers to use. Typically used in combination with 'zookeeperChrootPath'. Must be empty if you use KRaft mode. | `[]` | - -```console -helm install my-release \ - --set replicaCount=3 \ - oci://REGISTRY_NAME/REPOSITORY_NAME/kafka -``` - -> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. - -The above command deploys Kafka with 3 brokers (replicas). - -Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, - -```console -helm install my-release -f values.yaml oci://REGISTRY_NAME/REPOSITORY_NAME/kafka -``` - -> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. -> **Tip**: You can use the default [values.yaml](https://github.com/bitnami/charts/tree/main/bitnami/kafka/values.yaml) - ## Configuration and installation details ### Resource requests and limits @@ -966,9 +274,10 @@ You can use the following values to generate External-DNS annotations which auto ```yaml externalAccess: - service: - annotations: - external-dns.alpha.kubernetes.io/hostname: "{{ .targetPod }}.example.com" + controller: + service: + annotations: + external-dns.alpha.kubernetes.io/hostname: "{{ .targetPod }}.example.com" ``` ### Enable metrics @@ -1109,88 +418,715 @@ As an alternative, this chart supports using an initContainer to change the owne You can enable this initContainer by setting `volumePermissions.enabled` to `true`. +## Parameters + +### Global parameters + +| Name | Description | Value | +| ----------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------ | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | +| `global.compatibility.openshift.adaptSecurityContext` | Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation) | `auto` | + +### Common parameters + +| Name | Description | Value | +| ------------------------- | --------------------------------------------------------------------------------------- | --------------- | +| `kubeVersion` | Override Kubernetes version | `""` | +| `nameOverride` | String to partially override common.names.fullname | `""` | +| `fullnameOverride` | String to fully override common.names.fullname | `""` | +| `clusterDomain` | Default Kubernetes cluster domain | `cluster.local` | +| `commonLabels` | Labels to add to all deployed objects | `{}` | +| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | +| `extraDeploy` | Array of extra objects to deploy with the release | `[]` | +| `serviceBindings.enabled` | Create secret for service binding (Experimental) | `false` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the statefulset | `["sleep"]` | +| `diagnosticMode.args` | Args to override all containers in the statefulset | `["infinity"]` | + +### Kafka parameters + +| Name | Description | Value | +| ------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------- | +| `image.registry` | Kafka image registry | `REGISTRY_NAME` | +| `image.repository` | Kafka image repository | `REPOSITORY_NAME/kafka` | +| `image.digest` | Kafka image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `image.pullPolicy` | Kafka image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `image.debug` | Specify if debug values should be set | `false` | +| `extraInit` | Additional content for the kafka init script, rendered as a template. | `""` | +| `config` | Configuration file for Kafka, rendered as a template. Auto-generated based on chart values when not specified. | `""` | +| `existingConfigmap` | ConfigMap with Kafka Configuration | `""` | +| `extraConfig` | Additional configuration to be appended at the end of the generated Kafka configuration file. | `""` | +| `secretConfig` | Additional configuration to be appended at the end of the generated Kafka configuration file. | `""` | +| `existingSecretConfig` | Secret with additonal configuration that will be appended to the end of the generated Kafka configuration file | `""` | +| `log4j` | An optional log4j.properties file to overwrite the default of the Kafka brokers | `""` | +| `existingLog4jConfigMap` | The name of an existing ConfigMap containing a log4j.properties file | `""` | +| `heapOpts` | Kafka Java Heap size | `-Xmx1024m -Xms1024m` | +| `interBrokerProtocolVersion` | Override the setting 'inter.broker.protocol.version' during the ZK migration. | `""` | +| `listeners.client.name` | Name for the Kafka client listener | `CLIENT` | +| `listeners.client.containerPort` | Port for the Kafka client listener | `9092` | +| `listeners.client.protocol` | Security protocol for the Kafka client listener. Allowed values are 'PLAINTEXT', 'SASL_PLAINTEXT', 'SASL_SSL' and 'SSL' | `SASL_PLAINTEXT` | +| `listeners.client.sslClientAuth` | Optional. If SASL_SSL is enabled, configure mTLS TLS authentication type. If SSL protocol is enabled, overrides tls.authType for this listener. Allowed values are 'none', 'requested' and 'required' | `""` | +| `listeners.controller.name` | Name for the Kafka controller listener | `CONTROLLER` | +| `listeners.controller.containerPort` | Port for the Kafka controller listener | `9093` | +| `listeners.controller.protocol` | Security protocol for the Kafka controller listener. Allowed values are 'PLAINTEXT', 'SASL_PLAINTEXT', 'SASL_SSL' and 'SSL' | `SASL_PLAINTEXT` | +| `listeners.controller.sslClientAuth` | Optional. If SASL_SSL is enabled, configure mTLS TLS authentication type. If SSL protocol is enabled, overrides tls.authType for this listener. Allowed values are 'none', 'requested' and 'required' | `""` | +| `listeners.interbroker.name` | Name for the Kafka inter-broker listener | `INTERNAL` | +| `listeners.interbroker.containerPort` | Port for the Kafka inter-broker listener | `9094` | +| `listeners.interbroker.protocol` | Security protocol for the Kafka inter-broker listener. Allowed values are 'PLAINTEXT', 'SASL_PLAINTEXT', 'SASL_SSL' and 'SSL' | `SASL_PLAINTEXT` | +| `listeners.interbroker.sslClientAuth` | Optional. If SASL_SSL is enabled, configure mTLS TLS authentication type. If SSL protocol is enabled, overrides tls.authType for this listener. Allowed values are 'none', 'requested' and 'required' | `""` | +| `listeners.external.containerPort` | Port for the Kafka external listener | `9095` | +| `listeners.external.protocol` | Security protocol for the Kafka external listener. . Allowed values are 'PLAINTEXT', 'SASL_PLAINTEXT', 'SASL_SSL' and 'SSL' | `SASL_PLAINTEXT` | +| `listeners.external.name` | Name for the Kafka external listener | `EXTERNAL` | +| `listeners.external.sslClientAuth` | Optional. If SASL_SSL is enabled, configure mTLS TLS authentication type. If SSL protocol is enabled, overrides tls.sslClientAuth for this listener. Allowed values are 'none', 'requested' and 'required' | `""` | +| `listeners.extraListeners` | Array of listener objects to be appended to already existing listeners | `[]` | +| `listeners.overrideListeners` | Overrides the Kafka 'listeners' configuration setting. | `""` | +| `listeners.advertisedListeners` | Overrides the Kafka 'advertised.listener' configuration setting. | `""` | +| `listeners.securityProtocolMap` | Overrides the Kafka 'security.protocol.map' configuration setting. | `""` | + +### Kafka SASL parameters + +| Name | Description | Value | +| ----------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------- | +| `sasl.enabledMechanisms` | Comma-separated list of allowed SASL mechanisms when SASL listeners are configured. Allowed types: `PLAIN`, `SCRAM-SHA-256`, `SCRAM-SHA-512`, `OAUTHBEARER` | `PLAIN,SCRAM-SHA-256,SCRAM-SHA-512` | +| `sasl.interBrokerMechanism` | SASL mechanism for inter broker communication. | `PLAIN` | +| `sasl.controllerMechanism` | SASL mechanism for controller communications. | `PLAIN` | +| `sasl.oauthbearer.tokenEndpointUrl` | The URL for the OAuth/OIDC identity provider | `""` | +| `sasl.oauthbearer.jwksEndpointUrl` | The OAuth/OIDC provider URL from which the provider's JWKS (JSON Web Key Set) can be retrieved | `""` | +| `sasl.oauthbearer.expectedAudience` | The comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences | `""` | +| `sasl.oauthbearer.subClaimName` | The OAuth claim name for the subject. | `sub` | +| `sasl.interbroker.user` | Username for inter-broker communications when SASL is enabled | `inter_broker_user` | +| `sasl.interbroker.password` | Password for inter-broker communications when SASL is enabled. If not set and SASL is enabled for the controller listener, a random password will be generated. | `""` | +| `sasl.interbroker.clientId` | Client ID for inter-broker communications when SASL is enabled with mechanism OAUTHBEARER | `inter_broker_client` | +| `sasl.interbroker.clientSecret` | Client Secret for inter-broker communications when SASL is enabled with mechanism OAUTHBEARER. If not set and SASL is enabled for the controller listener, a random secret will be generated. | `""` | +| `sasl.controller.user` | Username for controller communications when SASL is enabled | `controller_user` | +| `sasl.controller.password` | Password for controller communications when SASL is enabled. If not set and SASL is enabled for the inter-broker listener, a random password will be generated. | `""` | +| `sasl.controller.clientId` | Client ID for controller communications when SASL is enabled with mechanism OAUTHBEARER | `controller_broker_client` | +| `sasl.controller.clientSecret` | Client Secret for controller communications when SASL is enabled with mechanism OAUTHBEARER. If not set and SASL is enabled for the inter-broker listener, a random secret will be generated. | `""` | +| `sasl.client.users` | Comma-separated list of usernames for client communications when SASL is enabled | `["user1"]` | +| `sasl.client.passwords` | Comma-separated list of passwords for client communications when SASL is enabled, must match the number of client.users | `""` | +| `sasl.zookeeper.user` | Username for zookeeper communications when SASL is enabled. | `""` | +| `sasl.zookeeper.password` | Password for zookeeper communications when SASL is enabled. | `""` | +| `sasl.existingSecret` | Name of the existing secret containing credentials for clientUsers, interBrokerUser, controllerUser and zookeeperUser | `""` | + +### Kafka TLS parameters + +| Name | Description | Value | +| -------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------- | -------------------------- | +| `tls.type` | Format to use for TLS certificates. Allowed types: `JKS` and `PEM` | `JKS` | +| `tls.pemChainIncluded` | Flag to denote that the Certificate Authority (CA) certificates are bundled with the endpoint cert. | `false` | +| `tls.existingSecret` | Name of the existing secret containing the TLS certificates for the Kafka nodes. | `""` | +| `tls.autoGenerated` | Generate automatically self-signed TLS certificates for Kafka brokers. Currently only supported if `tls.type` is `PEM` | `false` | +| `tls.passwordsSecret` | Name of the secret containing the password to access the JKS files or PEM key when they are password-protected. (`key`: `password`) | `""` | +| `tls.passwordsSecretKeystoreKey` | The secret key from the tls.passwordsSecret containing the password for the Keystore. | `keystore-password` | +| `tls.passwordsSecretTruststoreKey` | The secret key from the tls.passwordsSecret containing the password for the Truststore. | `truststore-password` | +| `tls.passwordsSecretPemPasswordKey` | The secret key from the tls.passwordsSecret containing the password for the PEM key inside 'tls.passwordsSecret'. | `""` | +| `tls.keystorePassword` | Password to access the JKS keystore when it is password-protected. Ignored when 'tls.passwordsSecret' is provided. | `""` | +| `tls.truststorePassword` | Password to access the JKS truststore when it is password-protected. Ignored when 'tls.passwordsSecret' is provided. | `""` | +| `tls.keyPassword` | Password to access the PEM key when it is password-protected. | `""` | +| `tls.jksKeystoreKey` | The secret key from the `tls.existingSecret` containing the keystore | `""` | +| `tls.jksTruststoreSecret` | Name of the existing secret containing your truststore if truststore not existing or different from the one in the `tls.existingSecret` | `""` | +| `tls.jksTruststoreKey` | The secret key from the `tls.existingSecret` or `tls.jksTruststoreSecret` containing the truststore | `""` | +| `tls.endpointIdentificationAlgorithm` | The endpoint identification algorithm to validate server hostname using server certificate | `https` | +| `tls.sslClientAuth` | Sets the default value for the ssl.client.auth Kafka setting. | `required` | +| `tls.zookeeper.enabled` | Enable TLS for Zookeeper client connections. | `false` | +| `tls.zookeeper.verifyHostname` | Hostname validation. | `true` | +| `tls.zookeeper.existingSecret` | Name of the existing secret containing the TLS certificates for ZooKeeper client communications. | `""` | +| `tls.zookeeper.existingSecretKeystoreKey` | The secret key from the tls.zookeeper.existingSecret containing the Keystore. | `zookeeper.keystore.jks` | +| `tls.zookeeper.existingSecretTruststoreKey` | The secret key from the tls.zookeeper.existingSecret containing the Truststore. | `zookeeper.truststore.jks` | +| `tls.zookeeper.passwordsSecret` | Existing secret containing Keystore and Truststore passwords. | `""` | +| `tls.zookeeper.passwordsSecretKeystoreKey` | The secret key from the tls.zookeeper.passwordsSecret containing the password for the Keystore. | `keystore-password` | +| `tls.zookeeper.passwordsSecretTruststoreKey` | The secret key from the tls.zookeeper.passwordsSecret containing the password for the Truststore. | `truststore-password` | +| `tls.zookeeper.keystorePassword` | Password to access the JKS keystore when it is password-protected. Ignored when 'tls.passwordsSecret' is provided. | `""` | +| `tls.zookeeper.truststorePassword` | Password to access the JKS truststore when it is password-protected. Ignored when 'tls.passwordsSecret' is provided. | `""` | +| `extraEnvVars` | Extra environment variables to add to Kafka pods | `[]` | +| `extraEnvVarsCM` | ConfigMap with extra environment variables | `""` | +| `extraEnvVarsSecret` | Secret with extra environment variables | `""` | +| `extraVolumes` | Optionally specify extra list of additional volumes for the Kafka pod(s) | `[]` | +| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Kafka container(s) | `[]` | +| `sidecars` | Add additional sidecar containers to the Kafka pod(s) | `[]` | +| `initContainers` | Add additional Add init containers to the Kafka pod(s) | `[]` | +| `dnsPolicy` | Specifies the DNS policy for the zookeeper pods | `""` | +| `dnsConfig` | allows users more control on the DNS settings for a Pod. Required if `dnsPolicy` is set to `None` | `{}` | + +### Controller-eligible statefulset parameters + +| Name | Description | Value | +| -------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------- | +| `controller.replicaCount` | Number of Kafka controller-eligible nodes | `3` | +| `controller.controllerOnly` | If set to true, controller nodes will be deployed as dedicated controllers, instead of controller+broker processes. | `false` | +| `controller.minId` | Minimal node.id values for controller-eligible nodes. Do not change after first initialization. | `0` | +| `controller.zookeeperMigrationMode` | Set to true to deploy cluster controller quorum | `false` | +| `controller.config` | Configuration file for Kafka controller-eligible nodes, rendered as a template. Auto-generated based on chart values when not specified. | `""` | +| `controller.existingConfigmap` | ConfigMap with Kafka Configuration for controller-eligible nodes. | `""` | +| `controller.extraConfig` | Additional configuration to be appended at the end of the generated Kafka controller-eligible nodes configuration file. | `""` | +| `controller.secretConfig` | Additional configuration to be appended at the end of the generated Kafka controller-eligible nodes configuration file. | `""` | +| `controller.existingSecretConfig` | Secret with additonal configuration that will be appended to the end of the generated Kafka controller-eligible nodes configuration file | `""` | +| `controller.heapOpts` | Kafka Java Heap size for controller-eligible nodes | `-Xmx1024m -Xms1024m` | +| `controller.command` | Override Kafka container command | `[]` | +| `controller.args` | Override Kafka container arguments | `[]` | +| `controller.extraEnvVars` | Extra environment variables to add to Kafka pods | `[]` | +| `controller.extraEnvVarsCM` | ConfigMap with extra environment variables | `""` | +| `controller.extraEnvVarsSecret` | Secret with extra environment variables | `""` | +| `controller.extraContainerPorts` | Kafka controller-eligible extra containerPorts. | `[]` | +| `controller.livenessProbe.enabled` | Enable livenessProbe on Kafka containers | `true` | +| `controller.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `10` | +| `controller.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `controller.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `controller.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` | +| `controller.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `controller.readinessProbe.enabled` | Enable readinessProbe on Kafka containers | `true` | +| `controller.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `controller.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `controller.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `controller.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `controller.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `controller.startupProbe.enabled` | Enable startupProbe on Kafka containers | `false` | +| `controller.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` | +| `controller.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `controller.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `controller.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `controller.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `controller.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `controller.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `controller.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `controller.lifecycleHooks` | lifecycleHooks for the Kafka container to automate configuration before or after startup | `{}` | +| `controller.initContainerResources.limits` | The resources limits for the init container | `{}` | +| `controller.initContainerResources.requests` | The requested resources for the init container | `{}` | +| `controller.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if controller.resources is set (controller.resources is recommended for production). | `small` | +| `controller.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `controller.podSecurityContext.enabled` | Enable security context for the pods | `true` | +| `controller.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | +| `controller.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | +| `controller.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | +| `controller.podSecurityContext.fsGroup` | Set Kafka pod's Security Context fsGroup | `1001` | +| `controller.podSecurityContext.seccompProfile.type` | Set Kafka pods's Security Context seccomp profile | `RuntimeDefault` | +| `controller.containerSecurityContext.enabled` | Enable Kafka containers' Security Context | `true` | +| `controller.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `controller.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `controller.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | +| `controller.containerSecurityContext.runAsGroup` | Set Kafka containers' Security Context runAsGroup | `1001` | +| `controller.containerSecurityContext.runAsNonRoot` | Set Kafka containers' Security Context runAsNonRoot | `true` | +| `controller.containerSecurityContext.allowPrivilegeEscalation` | Force the child process to be run as non-privileged | `false` | +| `controller.containerSecurityContext.readOnlyRootFilesystem` | Allows the pod to mount the RootFS as ReadOnly only | `true` | +| `controller.containerSecurityContext.capabilities.drop` | Set Kafka containers' server Security Context capabilities to be dropped | `["ALL"]` | +| `controller.automountServiceAccountToken` | Mount Service Account token in pod | `false` | +| `controller.hostAliases` | Kafka pods host aliases | `[]` | +| `controller.hostNetwork` | Specify if host network should be enabled for Kafka pods | `false` | +| `controller.hostIPC` | Specify if host IPC should be enabled for Kafka pods | `false` | +| `controller.podLabels` | Extra labels for Kafka pods | `{}` | +| `controller.podAnnotations` | Extra annotations for Kafka pods | `{}` | +| `controller.podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `controller.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `controller.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `controller.nodeAffinityPreset.key` | Node label key to match Ignored if `affinity` is set. | `""` | +| `controller.nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | +| `controller.affinity` | Affinity for pod assignment | `{}` | +| `controller.nodeSelector` | Node labels for pod assignment | `{}` | +| `controller.tolerations` | Tolerations for pod assignment | `[]` | +| `controller.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `controller.terminationGracePeriodSeconds` | Seconds the pod needs to gracefully terminate | `""` | +| `controller.podManagementPolicy` | StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel | `Parallel` | +| `controller.minReadySeconds` | How many seconds a pod needs to be ready before killing the next, during update | `0` | +| `controller.priorityClassName` | Name of the existing priority class to be used by kafka pods | `""` | +| `controller.runtimeClassName` | Name of the runtime class to be used by pod(s) | `""` | +| `controller.enableServiceLinks` | Whether information about services should be injected into pod's environment variable | `true` | +| `controller.schedulerName` | Name of the k8s scheduler (other than default) | `""` | +| `controller.updateStrategy.type` | Kafka statefulset strategy type | `RollingUpdate` | +| `controller.extraVolumes` | Optionally specify extra list of additional volumes for the Kafka pod(s) | `[]` | +| `controller.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Kafka container(s) | `[]` | +| `controller.sidecars` | Add additional sidecar containers to the Kafka pod(s) | `[]` | +| `controller.initContainers` | Add additional Add init containers to the Kafka pod(s) | `[]` | +| `controller.pdb.create` | Deploy a pdb object for the Kafka pod | `false` | +| `controller.pdb.minAvailable` | Maximum number/percentage of unavailable Kafka replicas | `""` | +| `controller.pdb.maxUnavailable` | Maximum number/percentage of unavailable Kafka replicas | `1` | +| `controller.persistence.enabled` | Enable Kafka data persistence using PVC, note that ZooKeeper persistence is unaffected | `true` | +| `controller.persistence.existingClaim` | A manually managed Persistent Volume and Claim | `""` | +| `controller.persistence.storageClass` | PVC Storage Class for Kafka data volume | `""` | +| `controller.persistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` | +| `controller.persistence.size` | PVC Storage Request for Kafka data volume | `8Gi` | +| `controller.persistence.annotations` | Annotations for the PVC | `{}` | +| `controller.persistence.labels` | Labels for the PVC | `{}` | +| `controller.persistence.selector` | Selector to match an existing Persistent Volume for Kafka data PVC. If set, the PVC can't have a PV dynamically provisioned for it | `{}` | +| `controller.persistence.mountPath` | Mount path of the Kafka data volume | `/bitnami/kafka` | +| `controller.logPersistence.enabled` | Enable Kafka logs persistence using PVC, note that ZooKeeper persistence is unaffected | `false` | +| `controller.logPersistence.existingClaim` | A manually managed Persistent Volume and Claim | `""` | +| `controller.logPersistence.storageClass` | PVC Storage Class for Kafka logs volume | `""` | +| `controller.logPersistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` | +| `controller.logPersistence.size` | PVC Storage Request for Kafka logs volume | `8Gi` | +| `controller.logPersistence.annotations` | Annotations for the PVC | `{}` | +| `controller.logPersistence.selector` | Selector to match an existing Persistent Volume for Kafka log data PVC. If set, the PVC can't have a PV dynamically provisioned for it | `{}` | +| `controller.logPersistence.mountPath` | Mount path of the Kafka logs volume | `/opt/bitnami/kafka/logs` | + +### Broker-only statefulset parameters + +| Name | Description | Value | +| ---------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------- | +| `broker.replicaCount` | Number of Kafka broker-only nodes | `0` | +| `broker.minId` | Minimal node.id values for broker-only nodes. Do not change after first initialization. | `100` | +| `broker.zookeeperMigrationMode` | Set to true to deploy cluster controller quorum | `false` | +| `broker.config` | Configuration file for Kafka broker-only nodes, rendered as a template. Auto-generated based on chart values when not specified. | `""` | +| `broker.existingConfigmap` | ConfigMap with Kafka Configuration for broker-only nodes. | `""` | +| `broker.extraConfig` | Additional configuration to be appended at the end of the generated Kafka broker-only nodes configuration file. | `""` | +| `broker.secretConfig` | Additional configuration to be appended at the end of the generated Kafka broker-only nodes configuration file. | `""` | +| `broker.existingSecretConfig` | Secret with additonal configuration that will be appended to the end of the generated Kafka broker-only nodes configuration file | `""` | +| `broker.heapOpts` | Kafka Java Heap size for broker-only nodes | `-Xmx1024m -Xms1024m` | +| `broker.command` | Override Kafka container command | `[]` | +| `broker.args` | Override Kafka container arguments | `[]` | +| `broker.extraEnvVars` | Extra environment variables to add to Kafka pods | `[]` | +| `broker.extraEnvVarsCM` | ConfigMap with extra environment variables | `""` | +| `broker.extraEnvVarsSecret` | Secret with extra environment variables | `""` | +| `broker.extraContainerPorts` | Kafka broker-only extra containerPorts. | `[]` | +| `broker.livenessProbe.enabled` | Enable livenessProbe on Kafka containers | `true` | +| `broker.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `10` | +| `broker.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `broker.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `broker.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` | +| `broker.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `broker.readinessProbe.enabled` | Enable readinessProbe on Kafka containers | `true` | +| `broker.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `broker.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `broker.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `broker.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `broker.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `broker.startupProbe.enabled` | Enable startupProbe on Kafka containers | `false` | +| `broker.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` | +| `broker.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `broker.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `broker.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `broker.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `broker.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `broker.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `broker.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `broker.lifecycleHooks` | lifecycleHooks for the Kafka container to automate configuration before or after startup | `{}` | +| `broker.initContainerResources.limits` | The resources limits for the container | `{}` | +| `broker.initContainerResources.requests` | The requested resources for the container | `{}` | +| `broker.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if broker.resources is set (broker.resources is recommended for production). | `small` | +| `broker.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `broker.podSecurityContext.enabled` | Enable security context for the pods | `true` | +| `broker.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | +| `broker.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | +| `broker.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | +| `broker.podSecurityContext.fsGroup` | Set Kafka pod's Security Context fsGroup | `1001` | +| `broker.podSecurityContext.seccompProfile.type` | Set Kafka pod's Security Context seccomp profile | `RuntimeDefault` | +| `broker.containerSecurityContext.enabled` | Enable Kafka containers' Security Context | `true` | +| `broker.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `broker.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `broker.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | +| `broker.containerSecurityContext.runAsNonRoot` | Set Kafka containers' Security Context runAsNonRoot | `true` | +| `broker.containerSecurityContext.allowPrivilegeEscalation` | Force the child process to be run as non-privileged | `false` | +| `broker.containerSecurityContext.readOnlyRootFilesystem` | Allows the pod to mount the RootFS as ReadOnly only | `true` | +| `broker.containerSecurityContext.capabilities.drop` | Set Kafka containers' server Security Context capabilities to be dropped | `["ALL"]` | +| `broker.automountServiceAccountToken` | Mount Service Account token in pod | `false` | +| `broker.hostAliases` | Kafka pods host aliases | `[]` | +| `broker.hostNetwork` | Specify if host network should be enabled for Kafka pods | `false` | +| `broker.hostIPC` | Specify if host IPC should be enabled for Kafka pods | `false` | +| `broker.podLabels` | Extra labels for Kafka pods | `{}` | +| `broker.podAnnotations` | Extra annotations for Kafka pods | `{}` | +| `broker.podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `broker.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `broker.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `broker.nodeAffinityPreset.key` | Node label key to match Ignored if `affinity` is set. | `""` | +| `broker.nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | +| `broker.affinity` | Affinity for pod assignment | `{}` | +| `broker.nodeSelector` | Node labels for pod assignment | `{}` | +| `broker.tolerations` | Tolerations for pod assignment | `[]` | +| `broker.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `broker.terminationGracePeriodSeconds` | Seconds the pod needs to gracefully terminate | `""` | +| `broker.podManagementPolicy` | StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel | `Parallel` | +| `broker.minReadySeconds` | How many seconds a pod needs to be ready before killing the next, during update | `0` | +| `broker.priorityClassName` | Name of the existing priority class to be used by kafka pods | `""` | +| `broker.runtimeClassName` | Name of the runtime class to be used by pod(s) | `""` | +| `broker.enableServiceLinks` | Whether information about services should be injected into pod's environment variable | `true` | +| `broker.schedulerName` | Name of the k8s scheduler (other than default) | `""` | +| `broker.updateStrategy.type` | Kafka statefulset strategy type | `RollingUpdate` | +| `broker.extraVolumes` | Optionally specify extra list of additional volumes for the Kafka pod(s) | `[]` | +| `broker.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Kafka container(s) | `[]` | +| `broker.sidecars` | Add additional sidecar containers to the Kafka pod(s) | `[]` | +| `broker.initContainers` | Add additional Add init containers to the Kafka pod(s) | `[]` | +| `broker.pdb.create` | Deploy a pdb object for the Kafka pod | `false` | +| `broker.pdb.minAvailable` | Maximum number/percentage of unavailable Kafka replicas | `""` | +| `broker.pdb.maxUnavailable` | Maximum number/percentage of unavailable Kafka replicas | `1` | +| `broker.persistence.enabled` | Enable Kafka data persistence using PVC, note that ZooKeeper persistence is unaffected | `true` | +| `broker.persistence.existingClaim` | A manually managed Persistent Volume and Claim | `""` | +| `broker.persistence.storageClass` | PVC Storage Class for Kafka data volume | `""` | +| `broker.persistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` | +| `broker.persistence.size` | PVC Storage Request for Kafka data volume | `8Gi` | +| `broker.persistence.annotations` | Annotations for the PVC | `{}` | +| `broker.persistence.labels` | Labels for the PVC | `{}` | +| `broker.persistence.selector` | Selector to match an existing Persistent Volume for Kafka data PVC. If set, the PVC can't have a PV dynamically provisioned for it | `{}` | +| `broker.persistence.mountPath` | Mount path of the Kafka data volume | `/bitnami/kafka` | +| `broker.logPersistence.enabled` | Enable Kafka logs persistence using PVC, note that ZooKeeper persistence is unaffected | `false` | +| `broker.logPersistence.existingClaim` | A manually managed Persistent Volume and Claim | `""` | +| `broker.logPersistence.storageClass` | PVC Storage Class for Kafka logs volume | `""` | +| `broker.logPersistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` | +| `broker.logPersistence.size` | PVC Storage Request for Kafka logs volume | `8Gi` | +| `broker.logPersistence.annotations` | Annotations for the PVC | `{}` | +| `broker.logPersistence.selector` | Selector to match an existing Persistent Volume for Kafka log data PVC. If set, the PVC can't have a PV dynamically provisioned for it | `{}` | +| `broker.logPersistence.mountPath` | Mount path of the Kafka logs volume | `/opt/bitnami/kafka/logs` | + +### Traffic Exposure parameters + +| Name | Description | Value | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------- | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.ports.client` | Kafka svc port for client connections | `9092` | +| `service.ports.controller` | Kafka svc port for controller connections. It is used if "kraft.enabled: true" | `9093` | +| `service.ports.interbroker` | Kafka svc port for inter-broker connections | `9094` | +| `service.ports.external` | Kafka svc port for external connections | `9095` | +| `service.extraPorts` | Extra ports to expose in the Kafka service (normally used with the `sidecar` value) | `[]` | +| `service.nodePorts.client` | Node port for the Kafka client connections | `""` | +| `service.nodePorts.external` | Node port for the Kafka external connections | `""` | +| `service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `service.clusterIP` | Kafka service Cluster IP | `""` | +| `service.loadBalancerIP` | Kafka service Load Balancer IP | `""` | +| `service.loadBalancerSourceRanges` | Kafka service Load Balancer sources | `[]` | +| `service.allocateLoadBalancerNodePorts` | Whether to allocate node ports when service type is LoadBalancer | `true` | +| `service.externalTrafficPolicy` | Kafka service external traffic policy | `Cluster` | +| `service.annotations` | Additional custom annotations for Kafka service | `{}` | +| `service.headless.controller.annotations` | Annotations for the controller-eligible headless service. | `{}` | +| `service.headless.controller.labels` | Labels for the controller-eligible headless service. | `{}` | +| `service.headless.broker.annotations` | Annotations for the broker-only headless service. | `{}` | +| `service.headless.broker.labels` | Labels for the broker-only headless service. | `{}` | +| `externalAccess.enabled` | Enable Kubernetes external cluster access to Kafka brokers | `false` | +| `externalAccess.autoDiscovery.enabled` | Enable using an init container to auto-detect external IPs/ports by querying the K8s API | `false` | +| `externalAccess.autoDiscovery.image.registry` | Init container auto-discovery image registry | `REGISTRY_NAME` | +| `externalAccess.autoDiscovery.image.repository` | Init container auto-discovery image repository | `REPOSITORY_NAME/kubectl` | +| `externalAccess.autoDiscovery.image.digest` | Kubectl image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `externalAccess.autoDiscovery.image.pullPolicy` | Init container auto-discovery image pull policy | `IfNotPresent` | +| `externalAccess.autoDiscovery.image.pullSecrets` | Init container auto-discovery image pull secrets | `[]` | +| `externalAccess.autoDiscovery.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if externalAccess.autoDiscovery.resources is set (externalAccess.autoDiscovery.resources is recommended for production). | `nano` | +| `externalAccess.autoDiscovery.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `externalAccess.autoDiscovery.containerSecurityContext.enabled` | Enable Kafka auto-discovery containers' Security Context | `true` | +| `externalAccess.autoDiscovery.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `externalAccess.autoDiscovery.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `externalAccess.autoDiscovery.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | +| `externalAccess.autoDiscovery.containerSecurityContext.runAsNonRoot` | Set Kafka auto-discovery containers' Security Context runAsNonRoot | `true` | +| `externalAccess.autoDiscovery.containerSecurityContext.allowPrivilegeEscalation` | Set Kafka auto-discovery containers' Security Context allowPrivilegeEscalation | `false` | +| `externalAccess.autoDiscovery.containerSecurityContext.readOnlyRootFilesystem` | Set Kafka auto-discovery containers' Security Context readOnlyRootFilesystem | `true` | +| `externalAccess.autoDiscovery.containerSecurityContext.capabilities.drop` | Set Kafka auto-discovery containers' Security Context capabilities to be dropped | `["ALL"]` | +| `externalAccess.autoDiscovery.containerSecurityContext.seccompProfile.type` | Set Kafka auto-discovery seccomp profile type | `RuntimeDefault` | +| `externalAccess.controller.forceExpose` | If set to true, force exposing controller-eligible nodes although they are configured as controller-only nodes | `false` | +| `externalAccess.controller.service.type` | Kubernetes Service type for external access. It can be NodePort, LoadBalancer or ClusterIP | `LoadBalancer` | +| `externalAccess.controller.service.ports.external` | Kafka port used for external access when service type is LoadBalancer | `9094` | +| `externalAccess.controller.service.loadBalancerIPs` | Array of load balancer IPs for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.controller.service.loadBalancerNames` | Array of load balancer Names for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.controller.service.loadBalancerAnnotations` | Array of load balancer annotations for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.controller.service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | `[]` | +| `externalAccess.controller.service.allocateLoadBalancerNodePorts` | Whether to allocate node ports when service type is LoadBalancer | `true` | +| `externalAccess.controller.service.nodePorts` | Array of node ports used for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.controller.service.externalIPs` | Use distinct service host IPs to configure Kafka external listener when service type is NodePort. Length must be the same as replicaCount | `[]` | +| `externalAccess.controller.service.useHostIPs` | Use service host IPs to configure Kafka external listener when service type is NodePort | `false` | +| `externalAccess.controller.service.usePodIPs` | using the MY_POD_IP address for external access. | `false` | +| `externalAccess.controller.service.domain` | Domain or external ip used to configure Kafka external listener when service type is NodePort or ClusterIP | `""` | +| `externalAccess.controller.service.publishNotReadyAddresses` | Indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready | `false` | +| `externalAccess.controller.service.labels` | Service labels for external access | `{}` | +| `externalAccess.controller.service.annotations` | Service annotations for external access | `{}` | +| `externalAccess.controller.service.extraPorts` | Extra ports to expose in the Kafka external service | `[]` | +| `externalAccess.broker.service.type` | Kubernetes Service type for external access. It can be NodePort, LoadBalancer or ClusterIP | `LoadBalancer` | +| `externalAccess.broker.service.ports.external` | Kafka port used for external access when service type is LoadBalancer | `9094` | +| `externalAccess.broker.service.loadBalancerIPs` | Array of load balancer IPs for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.broker.service.loadBalancerNames` | Array of load balancer Names for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.broker.service.loadBalancerAnnotations` | Array of load balancer annotations for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.broker.service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | `[]` | +| `externalAccess.broker.service.allocateLoadBalancerNodePorts` | Whether to allocate node ports when service type is LoadBalancer | `true` | +| `externalAccess.broker.service.nodePorts` | Array of node ports used for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.broker.service.externalIPs` | Use distinct service host IPs to configure Kafka external listener when service type is NodePort. Length must be the same as replicaCount | `[]` | +| `externalAccess.broker.service.useHostIPs` | Use service host IPs to configure Kafka external listener when service type is NodePort | `false` | +| `externalAccess.broker.service.usePodIPs` | using the MY_POD_IP address for external access. | `false` | +| `externalAccess.broker.service.domain` | Domain or external ip used to configure Kafka external listener when service type is NodePort or ClusterIP | `""` | +| `externalAccess.broker.service.publishNotReadyAddresses` | Indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready | `false` | +| `externalAccess.broker.service.labels` | Service labels for external access | `{}` | +| `externalAccess.broker.service.annotations` | Service annotations for external access | `{}` | +| `externalAccess.broker.service.extraPorts` | Extra ports to expose in the Kafka external service | `[]` | +| `networkPolicy.enabled` | Specifies whether a NetworkPolicy should be created | `true` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.allowExternalEgress` | Allow the pod to access any range of port and all destinations. | `true` | +| `networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolice | `[]` | +| `networkPolicy.extraEgress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces | `{}` | +| `networkPolicy.ingressNSPodMatchLabels` | Pod labels to match to allow traffic from other namespaces | `{}` | + +### Volume Permissions parameters + +| Name | Description | Value | +| ----------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------- | +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `REGISTRY_NAME` | +| `volumePermissions.image.repository` | Init container volume-permissions image repository | `REPOSITORY_NAME/os-shell` | +| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` | +| `volumePermissions.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production). | `nano` | +| `volumePermissions.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `volumePermissions.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `volumePermissions.containerSecurityContext.runAsUser` | User ID for the init container | `0` | + +### Other Parameters + +| Name | Description | Value | +| --------------------------------------------- | ---------------------------------------------------------------------------------------------- | ------- | +| `serviceAccount.create` | Enable creation of ServiceAccount for Kafka pods | `true` | +| `serviceAccount.name` | The name of the service account to use. If not set and `create` is `true`, a name is generated | `""` | +| `serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `false` | +| `serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` | +| `rbac.create` | Whether to create & use RBAC resources or not | `false` | + +### Metrics parameters + +| Name | Description | Value | +| ----------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------- | +| `metrics.kafka.enabled` | Whether or not to create a standalone Kafka exporter to expose Kafka metrics | `false` | +| `metrics.kafka.image.registry` | Kafka exporter image registry | `REGISTRY_NAME` | +| `metrics.kafka.image.repository` | Kafka exporter image repository | `REPOSITORY_NAME/kafka-exporter` | +| `metrics.kafka.image.digest` | Kafka exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `metrics.kafka.image.pullPolicy` | Kafka exporter image pull policy | `IfNotPresent` | +| `metrics.kafka.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `metrics.kafka.certificatesSecret` | Name of the existing secret containing the optional certificate and key files | `""` | +| `metrics.kafka.tlsCert` | The secret key from the certificatesSecret if 'client-cert' key different from the default (cert-file) | `cert-file` | +| `metrics.kafka.tlsKey` | The secret key from the certificatesSecret if 'client-key' key different from the default (key-file) | `key-file` | +| `metrics.kafka.tlsCaSecret` | Name of the existing secret containing the optional ca certificate for Kafka exporter client authentication | `""` | +| `metrics.kafka.tlsCaCert` | The secret key from the certificatesSecret or tlsCaSecret if 'ca-cert' key different from the default (ca-file) | `ca-file` | +| `metrics.kafka.extraFlags` | Extra flags to be passed to Kafka exporter | `{}` | +| `metrics.kafka.command` | Override Kafka exporter container command | `[]` | +| `metrics.kafka.args` | Override Kafka exporter container arguments | `[]` | +| `metrics.kafka.containerPorts.metrics` | Kafka exporter metrics container port | `9308` | +| `metrics.kafka.livenessProbe.enabled` | Enable livenessProbe | `true` | +| `metrics.kafka.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` | +| `metrics.kafka.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `metrics.kafka.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `1` | +| `metrics.kafka.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` | +| `metrics.kafka.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `metrics.kafka.readinessProbe.enabled` | Enable readinessProbe | `true` | +| `metrics.kafka.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `metrics.kafka.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `5` | +| `metrics.kafka.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `1` | +| `metrics.kafka.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` | +| `metrics.kafka.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `metrics.kafka.startupProbe.enabled` | Enable startupProbe | `false` | +| `metrics.kafka.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `5` | +| `metrics.kafka.startupProbe.periodSeconds` | Period seconds for startupProbe | `5` | +| `metrics.kafka.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `metrics.kafka.startupProbe.failureThreshold` | Failure threshold for startupProbe | `3` | +| `metrics.kafka.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `metrics.kafka.customStartupProbe` | Override default startup probe | `{}` | +| `metrics.kafka.customLivenessProbe` | Override default liveness probe | `{}` | +| `metrics.kafka.customReadinessProbe` | Override default readiness probe | `{}` | +| `metrics.kafka.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if metrics.kafka.resources is set (metrics.kafka.resources is recommended for production). | `micro` | +| `metrics.kafka.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `metrics.kafka.podSecurityContext.enabled` | Enable security context for the pods | `true` | +| `metrics.kafka.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | +| `metrics.kafka.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | +| `metrics.kafka.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | +| `metrics.kafka.podSecurityContext.fsGroup` | Set Kafka exporter pod's Security Context fsGroup | `1001` | +| `metrics.kafka.podSecurityContext.seccompProfile.type` | Set Kafka exporter pod's Security Context seccomp profile | `RuntimeDefault` | +| `metrics.kafka.containerSecurityContext.enabled` | Enable Kafka exporter containers' Security Context | `true` | +| `metrics.kafka.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `metrics.kafka.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `metrics.kafka.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | +| `metrics.kafka.containerSecurityContext.runAsNonRoot` | Set Kafka exporter containers' Security Context runAsNonRoot | `true` | +| `metrics.kafka.containerSecurityContext.allowPrivilegeEscalation` | Set Kafka exporter containers' Security Context allowPrivilegeEscalation | `false` | +| `metrics.kafka.containerSecurityContext.readOnlyRootFilesystem` | Set Kafka exporter containers' Security Context readOnlyRootFilesystem | `true` | +| `metrics.kafka.containerSecurityContext.capabilities.drop` | Set Kafka exporter containers' Security Context capabilities to be dropped | `["ALL"]` | +| `metrics.kafka.automountServiceAccountToken` | Mount Service Account token in pod | `false` | +| `metrics.kafka.hostAliases` | Kafka exporter pods host aliases | `[]` | +| `metrics.kafka.podLabels` | Extra labels for Kafka exporter pods | `{}` | +| `metrics.kafka.podAnnotations` | Extra annotations for Kafka exporter pods | `{}` | +| `metrics.kafka.podAffinityPreset` | Pod affinity preset. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `metrics.kafka.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `metrics.kafka.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `metrics.kafka.nodeAffinityPreset.key` | Node label key to match Ignored if `metrics.kafka.affinity` is set. | `""` | +| `metrics.kafka.nodeAffinityPreset.values` | Node label values to match. Ignored if `metrics.kafka.affinity` is set. | `[]` | +| `metrics.kafka.affinity` | Affinity for pod assignment | `{}` | +| `metrics.kafka.nodeSelector` | Node labels for pod assignment | `{}` | +| `metrics.kafka.tolerations` | Tolerations for pod assignment | `[]` | +| `metrics.kafka.schedulerName` | Name of the k8s scheduler (other than default) for Kafka exporter | `""` | +| `metrics.kafka.enableServiceLinks` | Whether information about services should be injected into pod's environment variable | `true` | +| `metrics.kafka.priorityClassName` | Kafka exporter pods' priorityClassName | `""` | +| `metrics.kafka.topologySpreadConstraints` | Topology Spread Constraints for pod assignment | `[]` | +| `metrics.kafka.extraVolumes` | Optionally specify extra list of additional volumes for the Kafka exporter pod(s) | `[]` | +| `metrics.kafka.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Kafka exporter container(s) | `[]` | +| `metrics.kafka.sidecars` | Add additional sidecar containers to the Kafka exporter pod(s) | `[]` | +| `metrics.kafka.initContainers` | Add init containers to the Kafka exporter pods | `[]` | +| `metrics.kafka.service.ports.metrics` | Kafka exporter metrics service port | `9308` | +| `metrics.kafka.service.clusterIP` | Static clusterIP or None for headless services | `""` | +| `metrics.kafka.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `metrics.kafka.service.annotations` | Annotations for the Kafka exporter service | `{}` | +| `metrics.kafka.serviceAccount.create` | Enable creation of ServiceAccount for Kafka exporter pods | `true` | +| `metrics.kafka.serviceAccount.name` | The name of the service account to use. If not set and `create` is `true`, a name is generated | `""` | +| `metrics.kafka.serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `false` | +| `metrics.jmx.enabled` | Whether or not to expose JMX metrics to Prometheus | `false` | +| `metrics.jmx.kafkaJmxPort` | JMX port where the exporter will collect metrics, exposed in the Kafka container. | `5555` | +| `metrics.jmx.image.registry` | JMX exporter image registry | `REGISTRY_NAME` | +| `metrics.jmx.image.repository` | JMX exporter image repository | `REPOSITORY_NAME/jmx-exporter` | +| `metrics.jmx.image.digest` | JMX exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `metrics.jmx.image.pullPolicy` | JMX exporter image pull policy | `IfNotPresent` | +| `metrics.jmx.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `metrics.jmx.containerSecurityContext.enabled` | Enable Prometheus JMX exporter containers' Security Context | `true` | +| `metrics.jmx.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `metrics.jmx.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `metrics.jmx.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | +| `metrics.jmx.containerSecurityContext.runAsNonRoot` | Set Prometheus JMX exporter containers' Security Context runAsNonRoot | `true` | +| `metrics.jmx.containerSecurityContext.allowPrivilegeEscalation` | Set Prometheus JMX exporter containers' Security Context allowPrivilegeEscalation | `false` | +| `metrics.jmx.containerSecurityContext.readOnlyRootFilesystem` | Set Prometheus JMX exporter containers' Security Context readOnlyRootFilesystem | `true` | +| `metrics.jmx.containerSecurityContext.capabilities.drop` | Set Prometheus JMX exporter containers' Security Context capabilities to be dropped | `["ALL"]` | +| `metrics.jmx.containerPorts.metrics` | Prometheus JMX exporter metrics container port | `5556` | +| `metrics.jmx.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if metrics.jmx.resources is set (metrics.jmx.resources is recommended for production). | `micro` | +| `metrics.jmx.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `metrics.jmx.service.ports.metrics` | Prometheus JMX exporter metrics service port | `5556` | +| `metrics.jmx.service.clusterIP` | Static clusterIP or None for headless services | `""` | +| `metrics.jmx.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `metrics.jmx.service.annotations` | Annotations for the Prometheus JMX exporter service | `{}` | +| `metrics.jmx.whitelistObjectNames` | Allows setting which JMX objects you want to expose to via JMX stats to JMX exporter | `["kafka.controller:*","kafka.server:*","java.lang:*","kafka.network:*","kafka.log:*"]` | +| `metrics.jmx.config` | Configuration file for JMX exporter | `""` | +| `metrics.jmx.existingConfigmap` | Name of existing ConfigMap with JMX exporter configuration | `""` | +| `metrics.jmx.extraRules` | Add extra rules to JMX exporter configuration | `""` | +| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (requires `metrics.kafka.enabled` or `metrics.jmx.enabled` to be `true`) | `false` | +| `metrics.serviceMonitor.namespace` | Namespace in which Prometheus is running | `""` | +| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped | `""` | +| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.labels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | +| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | +| `metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | +| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | +| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | +| `metrics.prometheusRule.enabled` | if `true`, creates a Prometheus Operator PrometheusRule (requires `metrics.kafka.enabled` or `metrics.jmx.enabled` to be `true`) | `false` | +| `metrics.prometheusRule.namespace` | Namespace in which Prometheus is running | `""` | +| `metrics.prometheusRule.labels` | Additional labels that can be used so PrometheusRule will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.groups` | Prometheus Rule Groups for Kafka | `[]` | + +### Kafka provisioning parameters + +| Name | Description | Value | +| ---------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------- | +| `provisioning.enabled` | Enable kafka provisioning Job | `false` | +| `provisioning.automountServiceAccountToken` | Mount Service Account token in pod | `false` | +| `provisioning.numPartitions` | Default number of partitions for topics when unspecified | `1` | +| `provisioning.replicationFactor` | Default replication factor for topics when unspecified | `1` | +| `provisioning.topics` | Kafka topics to provision | `[]` | +| `provisioning.nodeSelector` | Node labels for pod assignment | `{}` | +| `provisioning.tolerations` | Tolerations for pod assignment | `[]` | +| `provisioning.extraProvisioningCommands` | Extra commands to run to provision cluster resources | `[]` | +| `provisioning.parallel` | Number of provisioning commands to run at the same time | `1` | +| `provisioning.preScript` | Extra bash script to run before topic provisioning. $CLIENT_CONF is path to properties file with most needed configurations | `""` | +| `provisioning.postScript` | Extra bash script to run after topic provisioning. $CLIENT_CONF is path to properties file with most needed configurations | `""` | +| `provisioning.auth.tls.type` | Format to use for TLS certificates. Allowed types: `JKS` and `PEM`. | `jks` | +| `provisioning.auth.tls.certificatesSecret` | Existing secret containing the TLS certificates for the Kafka provisioning Job. | `""` | +| `provisioning.auth.tls.cert` | The secret key from the certificatesSecret if 'cert' key different from the default (tls.crt) | `tls.crt` | +| `provisioning.auth.tls.key` | The secret key from the certificatesSecret if 'key' key different from the default (tls.key) | `tls.key` | +| `provisioning.auth.tls.caCert` | The secret key from the certificatesSecret if 'caCert' key different from the default (ca.crt) | `ca.crt` | +| `provisioning.auth.tls.keystore` | The secret key from the certificatesSecret if 'keystore' key different from the default (keystore.jks) | `keystore.jks` | +| `provisioning.auth.tls.truststore` | The secret key from the certificatesSecret if 'truststore' key different from the default (truststore.jks) | `truststore.jks` | +| `provisioning.auth.tls.passwordsSecret` | Name of the secret containing passwords to access the JKS files or PEM key when they are password-protected. | `""` | +| `provisioning.auth.tls.keyPasswordSecretKey` | The secret key from the passwordsSecret if 'keyPasswordSecretKey' key different from the default (key-password) | `key-password` | +| `provisioning.auth.tls.keystorePasswordSecretKey` | The secret key from the passwordsSecret if 'keystorePasswordSecretKey' key different from the default (keystore-password) | `keystore-password` | +| `provisioning.auth.tls.truststorePasswordSecretKey` | The secret key from the passwordsSecret if 'truststorePasswordSecretKey' key different from the default (truststore-password) | `truststore-password` | +| `provisioning.auth.tls.keyPassword` | Password to access the password-protected PEM key if necessary. Ignored if 'passwordsSecret' is provided. | `""` | +| `provisioning.auth.tls.keystorePassword` | Password to access the JKS keystore. Ignored if 'passwordsSecret' is provided. | `""` | +| `provisioning.auth.tls.truststorePassword` | Password to access the JKS truststore. Ignored if 'passwordsSecret' is provided. | `""` | +| `provisioning.command` | Override provisioning container command | `[]` | +| `provisioning.args` | Override provisioning container arguments | `[]` | +| `provisioning.extraEnvVars` | Extra environment variables to add to the provisioning pod | `[]` | +| `provisioning.extraEnvVarsCM` | ConfigMap with extra environment variables | `""` | +| `provisioning.extraEnvVarsSecret` | Secret with extra environment variables | `""` | +| `provisioning.podAnnotations` | Extra annotations for Kafka provisioning pods | `{}` | +| `provisioning.podLabels` | Extra labels for Kafka provisioning pods | `{}` | +| `provisioning.serviceAccount.create` | Enable creation of ServiceAccount for Kafka provisioning pods | `true` | +| `provisioning.serviceAccount.name` | The name of the service account to use. If not set and `create` is `true`, a name is generated | `""` | +| `provisioning.serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `false` | +| `provisioning.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if provisioning.resources is set (provisioning.resources is recommended for production). | `micro` | +| `provisioning.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `provisioning.podSecurityContext.enabled` | Enable security context for the pods | `true` | +| `provisioning.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | +| `provisioning.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | +| `provisioning.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | +| `provisioning.podSecurityContext.fsGroup` | Set Kafka provisioning pod's Security Context fsGroup | `1001` | +| `provisioning.podSecurityContext.seccompProfile.type` | Set Kafka provisioning pod's Security Context seccomp profile | `RuntimeDefault` | +| `provisioning.containerSecurityContext.enabled` | Enable Kafka provisioning containers' Security Context | `true` | +| `provisioning.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `provisioning.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `provisioning.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | +| `provisioning.containerSecurityContext.runAsNonRoot` | Set Kafka provisioning containers' Security Context runAsNonRoot | `true` | +| `provisioning.containerSecurityContext.allowPrivilegeEscalation` | Set Kafka provisioning containers' Security Context allowPrivilegeEscalation | `false` | +| `provisioning.containerSecurityContext.readOnlyRootFilesystem` | Set Kafka provisioning containers' Security Context readOnlyRootFilesystem | `true` | +| `provisioning.containerSecurityContext.capabilities.drop` | Set Kafka provisioning containers' Security Context capabilities to be dropped | `["ALL"]` | +| `provisioning.schedulerName` | Name of the k8s scheduler (other than default) for kafka provisioning | `""` | +| `provisioning.enableServiceLinks` | Whether information about services should be injected into pod's environment variable | `true` | +| `provisioning.extraVolumes` | Optionally specify extra list of additional volumes for the Kafka provisioning pod(s) | `[]` | +| `provisioning.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Kafka provisioning container(s) | `[]` | +| `provisioning.sidecars` | Add additional sidecar containers to the Kafka provisioning pod(s) | `[]` | +| `provisioning.initContainers` | Add additional Add init containers to the Kafka provisioning pod(s) | `[]` | +| `provisioning.waitForKafka` | If true use an init container to wait until kafka is ready before starting provisioning | `true` | +| `provisioning.useHelmHooks` | Flag to indicate usage of helm hooks | `true` | + +### KRaft chart parameters + +| Name | Description | Value | +| ------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------ | +| `kraft.enabled` | Switch to enable or disable the KRaft mode for Kafka | `true` | +| `kraft.existingClusterIdSecret` | Name of the secret containing the cluster ID for the Kafka KRaft cluster. This is incompatible with the clusterId parameter. If both are set, the existingClusterIdSecret will be used | `""` | +| `kraft.clusterId` | Kafka Kraft cluster ID. If not set, a random cluster ID will be generated the first time Kraft is initialized. | `""` | +| `kraft.controllerQuorumVoters` | Override the Kafka controller quorum voters of the Kafka Kraft cluster. If not set, it will be automatically configured to use all controller-elegible nodes. | `""` | + +### ZooKeeper chart parameters + +| Name | Description | Value | +| --------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------- | +| `zookeeperChrootPath` | Path which puts data under some path in the global ZooKeeper namespace | `""` | +| `zookeeper.enabled` | Switch to enable or disable the ZooKeeper helm chart. Must be false if you use KRaft mode. | `false` | +| `zookeeper.replicaCount` | Number of ZooKeeper nodes | `1` | +| `zookeeper.auth.client.enabled` | Enable ZooKeeper auth | `false` | +| `zookeeper.auth.client.clientUser` | User that will use ZooKeeper client (zkCli.sh) to authenticate. Must exist in the serverUsers comma-separated list. | `""` | +| `zookeeper.auth.client.clientPassword` | Password that will use ZooKeeper client (zkCli.sh) to authenticate. Must exist in the serverPasswords comma-separated list. | `""` | +| `zookeeper.auth.client.serverUsers` | Comma, semicolon or whitespace separated list of user to be created. Specify them as a string, for example: "user1,user2,admin" | `""` | +| `zookeeper.auth.client.serverPasswords` | Comma, semicolon or whitespace separated list of passwords to assign to users when created. Specify them as a string, for example: "pass4user1, pass4user2, pass4admin" | `""` | +| `zookeeper.persistence.enabled` | Enable persistence on ZooKeeper using PVC(s) | `true` | +| `zookeeper.persistence.storageClass` | Persistent Volume storage class | `""` | +| `zookeeper.persistence.accessModes` | Persistent Volume access modes | `["ReadWriteOnce"]` | +| `zookeeper.persistence.size` | Persistent Volume size | `8Gi` | +| `externalZookeeper.servers` | List of external zookeeper servers to use. Typically used in combination with 'zookeeperChrootPath'. Must be empty if you use KRaft mode. | `[]` | + +```console +helm install my-release \ + --set replicaCount=3 \ + oci://REGISTRY_NAME/REPOSITORY_NAME/kafka +``` + +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. + +The above command deploys Kafka with 3 brokers (replicas). + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +helm install my-release -f values.yaml oci://REGISTRY_NAME/REPOSITORY_NAME/kafka +``` + +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. +> **Tip**: You can use the default [values.yaml](https://github.com/bitnami/charts/tree/main/bitnami/kafka/values.yaml) + ## Troubleshooting Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). -## Migrating from Zookeeper (Early access) - -This guide is an adaptation from upstream documentation: [Migrate from ZooKeeper to KRaft](https://docs.confluent.io/platform/current/installation/migrate-zk-kraft.html) - -1. Retrieve the cluster ID from Zookeeper: - - ```console - $ kubectl exec -it -- zkCli.sh get /cluster/id - /opt/bitnami/java/bin/java - Connecting to localhost:2181 - - WATCHER:: - - WatchedEvent state:SyncConnected type:None path:null - {"version":"1","id":"TEr3HVPvTqSWixWRHngP5g"} - ``` - -2. Deploy at least one Kraft controller-only in your deployment and enable `zookeeperMigrationMode=true`. The Kraft controllers will migrate the data from your Kafka ZkBroker to Kraft mode. - - To do so add the following values to your Zookeeper deployment when upgrading: - - ```yaml - controller: - replicaCount: 1 - controllerOnly: true - zookeeperMigrationMode: true - # If needed, set controllers minID to avoid conflict with your ZK brokers' ids. - # minID: 0 - broker: - zookeeperMigrationMode: true - kraft: - enabled: true - clusterId: "" - ``` - -3. Wait until until all brokers are ready. You should see the following log in the broker logs: - - ```console - INFO [KafkaServer id=100] Finished catching up on KRaft metadata log, requesting that the KRaft controller unfence this broker (kafka.server.KafkaServer) - INFO [BrokerLifecycleManager id=100 isZkBroker=true] The broker has been unfenced. Transitioning from RECOVERY to RUNNING. (kafka.server.BrokerLifecycleManager) - ``` - - In the controllers, the following message should show up: - - ```console - Transitioning ZK migration state from PRE_MIGRATION to MIGRATION (org.apache.kafka.controller.FeatureControlManager) - ``` - -4. Once all brokers have been successfully migrated, set `broker.zookeeperMigrationMode=false` to fully migrate them. - - ```yaml - broker: - zookeeperMigrationMode: false - ``` - -5. To conclude the migration, switch off migration mode on controllers and stop Zookeeper: - - ```yaml - controller: - zookeeperMigrationMode: false - zookeeper: - enabled: false - ``` - - After migration is complete, you should see the following message in your controllers: - - ```console - [2023-07-13 13:07:45,226] INFO [QuorumController id=1] Transitioning ZK migration state from MIGRATION to POST_MIGRATION (org.apache.kafka.controller.FeatureControlManager) - ``` - -6. (**Optional**) If you would like to switch to a non-dedicated cluster, set `controller.controllerOnly=false`. This will cause controller-only nodes to switch to controller+broker nodes. - - At that point, you could manually decommission broker-only nodes by reassigning its partitions to controller-eligible nodes. - - For more information about decommissioning kafka broker check the [Kafka documentation](https://www.confluent.io/blog/remove-kafka-brokers-from-any-cluster-the-easy-way/). - ## Upgrading +### To 28.0.0 + +This major bump changes the following security defaults: + +- `runAsGroup` is changed from `0` to `1001` +- `readOnlyRootFilesystem` is set to `true` +- `resourcesPreset` is changed from `none` to the minimum size working in our test suites (NOTE: `resourcesPreset` is not meant for production usage, but `resources` adapted to your use case). +- `global.compatibility.openshift.adaptSecurityContext` is changed from `disabled` to `auto`. +- The `networkPolicy` section has been normalized amongst all Bitnami charts. Compared to the previous approach, the values section has been simplified (check the Parameters section) and now it set to `enabled=true` by default. Egress traffic is allowed by default and ingress traffic is allowed by all pods but only to the ports set in `containerPorts` and `extraContainerPorts`. + +This could potentially break any customization or init scripts used in your deployment. If this is the case, change the default values to the previous ones. + ### To 26.0.0 This major release bumps the Kafka version to 3.6 [kafka upgrade notes](https://kafka.apache.org/36/documentation.html#upgrade). @@ -1275,7 +1211,83 @@ If upgrading from Kraft mode, existing PVCs from Kafka containers should be reat #### Upgrading from Zookeeper mode If upgrading from Zookeeper mode, make sure you set 'controller.replicaCount=0' and reattach the existing PVCs to 'broker' pods. -This will allow you to perform a migration to Kraft mode in the future by following the 'Migrating from Zookeeper' section of this documentation. +This will allow you to perform a migration to Kraft mode in the future by following the following section. + +##### Migrating from Zookeeper (Early access) + +This guide is an adaptation from upstream documentation: [Migrate from ZooKeeper to KRaft](https://docs.confluent.io/platform/current/installation/migrate-zk-kraft.html) + +1. Retrieve the cluster ID from Zookeeper: + + ```console + $ kubectl exec -it -- zkCli.sh get /cluster/id + /opt/bitnami/java/bin/java + Connecting to localhost:2181 + + WATCHER:: + + WatchedEvent state:SyncConnected type:None path:null + {"version":"1","id":"TEr3HVPvTqSWixWRHngP5g"} + ``` + +2. Deploy at least one Kraft controller-only in your deployment and enable `zookeeperMigrationMode=true`. The Kraft controllers will migrate the data from your Kafka ZkBroker to Kraft mode. + + To do so add the following values to your Zookeeper deployment when upgrading: + + ```yaml + controller: + replicaCount: 1 + controllerOnly: true + zookeeperMigrationMode: true + # If needed, set controllers minID to avoid conflict with your ZK brokers' ids. + # minID: 0 + broker: + zookeeperMigrationMode: true + kraft: + enabled: true + clusterId: "" + ``` + +3. Wait until until all brokers are ready. You should see the following log in the broker logs: + + ```console + INFO [KafkaServer id=100] Finished catching up on KRaft metadata log, requesting that the KRaft controller unfence this broker (kafka.server.KafkaServer) + INFO [BrokerLifecycleManager id=100 isZkBroker=true] The broker has been unfenced. Transitioning from RECOVERY to RUNNING. (kafka.server.BrokerLifecycleManager) + ``` + + In the controllers, the following message should show up: + + ```console + Transitioning ZK migration state from PRE_MIGRATION to MIGRATION (org.apache.kafka.controller.FeatureControlManager) + ``` + +4. Once all brokers have been successfully migrated, set `broker.zookeeperMigrationMode=false` to fully migrate them. + + ```yaml + broker: + zookeeperMigrationMode: false + ``` + +5. To conclude the migration, switch off migration mode on controllers and stop Zookeeper: + + ```yaml + controller: + zookeeperMigrationMode: false + zookeeper: + enabled: false + ``` + + After migration is complete, you should see the following message in your controllers: + + ```console + [2023-07-13 13:07:45,226] INFO [QuorumController id=1] Transitioning ZK migration state from MIGRATION to POST_MIGRATION (org.apache.kafka.controller.FeatureControlManager) + ``` + +6. (**Optional**) If you would like to switch to a non-dedicated cluster, set `controller.controllerOnly=false`. This will cause controller-only nodes to switch to controller+broker nodes. + + At that point, you could manually decommission broker-only nodes by reassigning its partitions to controller-eligible nodes. + + For more information about decommissioning kafka broker check the [Kafka documentation](https://www.confluent.io/blog/remove-kafka-brokers-from-any-cluster-the-easy-way/). #### Retaining PersistentVolumes diff --git a/charts/bitnami/kafka/charts/common/Chart.yaml b/charts/bitnami/kafka/charts/common/Chart.yaml index 2acf0cd40..f86ccd23a 100644 --- a/charts/bitnami/kafka/charts/common/Chart.yaml +++ b/charts/bitnami/kafka/charts/common/Chart.yaml @@ -2,7 +2,7 @@ annotations: category: Infrastructure licenses: Apache-2.0 apiVersion: v2 -appVersion: 2.18.0 +appVersion: 2.19.0 description: A Library Helm Chart for grouping common logic between bitnami charts. This chart is not deployable by itself. home: https://bitnami.com @@ -20,4 +20,4 @@ name: common sources: - https://github.com/bitnami/charts type: library -version: 2.18.0 +version: 2.19.0 diff --git a/charts/bitnami/kafka/charts/common/templates/_compatibility.tpl b/charts/bitnami/kafka/charts/common/templates/_compatibility.tpl index c529f0872..17665d567 100644 --- a/charts/bitnami/kafka/charts/common/templates/_compatibility.tpl +++ b/charts/bitnami/kafka/charts/common/templates/_compatibility.tpl @@ -28,6 +28,10 @@ Usage: {{- if or (eq .context.Values.global.compatibility.openshift.adaptSecurityContext "force") (and (eq .context.Values.global.compatibility.openshift.adaptSecurityContext "auto") (include "common.compatibility.isOpenshift" .context)) -}} {{/* Remove incompatible user/group values that do not work in Openshift out of the box */}} {{- $adaptedContext = omit $adaptedContext "fsGroup" "runAsUser" "runAsGroup" -}} + {{- if not .secContext.seLinuxOptions -}} + {{/* If it is an empty object, we remove it from the resulting context because it causes validation issues */}} + {{- $adaptedContext = omit $adaptedContext "seLinuxOptions" -}} + {{- end -}} {{- end -}} {{- end -}} {{- end -}} diff --git a/charts/bitnami/kafka/charts/zookeeper/Chart.lock b/charts/bitnami/kafka/charts/zookeeper/Chart.lock index 7bac6e407..4e03f81f4 100644 --- a/charts/bitnami/kafka/charts/zookeeper/Chart.lock +++ b/charts/bitnami/kafka/charts/zookeeper/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: common repository: oci://registry-1.docker.io/bitnamicharts - version: 2.16.1 -digest: sha256:f808a6fdc9c374d158ad7ff2f2c53a6c409e41da778d768b232dd20f86ef8b47 -generated: "2024-02-21T11:56:37.618424604Z" + version: 2.19.0 +digest: sha256:ac559eb57710d8904e266424ee364cd686d7e24517871f0c5c67f7c4500c2bcc +generated: "2024-03-12T14:54:51.594358116Z" diff --git a/charts/bitnami/kafka/charts/zookeeper/Chart.yaml b/charts/bitnami/kafka/charts/zookeeper/Chart.yaml index 8e55009f6..4be9f0d7e 100644 --- a/charts/bitnami/kafka/charts/zookeeper/Chart.yaml +++ b/charts/bitnami/kafka/charts/zookeeper/Chart.yaml @@ -4,10 +4,10 @@ annotations: - name: os-shell image: docker.io/bitnami/os-shell:12-debian-12-r16 - name: zookeeper - image: docker.io/bitnami/zookeeper:3.9.1-debian-12-r15 + image: docker.io/bitnami/zookeeper:3.9.2-debian-12-r0 licenses: Apache-2.0 apiVersion: v2 -appVersion: 3.9.1 +appVersion: 3.9.2 dependencies: - name: common repository: oci://registry-1.docker.io/bitnamicharts @@ -26,4 +26,4 @@ maintainers: name: zookeeper sources: - https://github.com/bitnami/charts/tree/main/bitnami/zookeeper -version: 12.11.1 +version: 13.0.1 diff --git a/charts/bitnami/kafka/charts/zookeeper/README.md b/charts/bitnami/kafka/charts/zookeeper/README.md index fbc3d2e12..289c5383c 100644 --- a/charts/bitnami/kafka/charts/zookeeper/README.md +++ b/charts/bitnami/kafka/charts/zookeeper/README.md @@ -42,25 +42,105 @@ These commands deploy ZooKeeper on the Kubernetes cluster in the default configu > **Tip**: List all releases using `helm list` -## Uninstalling the Chart +## Configuration and installation details -To uninstall/delete the `my-release` deployment: +### Resource requests and limits -```console -helm delete my-release +Bitnami charts allow setting resource requests and limits for all containers inside the chart deployment. These are inside the `resources` value (check parameter table). Setting requests is essential for production workloads and these should be adapted to your specific use case. + +To make this process easier, the chart contains the `resourcesPreset` values, which automatically sets the `resources` section according to different presets. Check these presets in [the bitnami/common chart](https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15). However, in production workloads using `resourcePreset` is discouraged as it may not fully adapt to your specific needs. Find more information on container resource management in the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). + +### [Rolling vs Immutable tags](https://docs.bitnami.com/tutorials/understand-rolling-tags-containers) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Configure log level + +You can configure the ZooKeeper log level using the `ZOO_LOG_LEVEL` environment variable or the parameter `logLevel`. By default, it is set to `ERROR` because each use of the liveness probe and the readiness probe produces an `INFO` message on connection and a `WARN` message on disconnection, generating a high volume of noise in your logs. + +In order to remove that log noise so levels can be set to 'INFO', two changes must be made. + +First, ensure that you are not getting metrics via the deprecated pattern of polling 'mntr' on the ZooKeeper client port. The preferred method of polling for Apache ZooKeeper metrics is the ZooKeeper metrics server. This is supported in this chart when setting `metrics.enabled` to `true`. + +Second, to avoid the connection/disconnection messages from the probes, you can set custom values for these checks which direct them to the ZooKeeper Admin Server instead of the client port. By default, an Admin Server will be started that listens on `localhost` at port `8080`. The following is an example of this use of the Admin Server for probes: + +```yaml +livenessProbe: + enabled: false +readinessProbe: + enabled: false +customLivenessProbe: + exec: + command: ['/bin/bash', '-c', 'curl -s -m 2 http://localhost:8080/commands/ruok | grep ruok'] + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 +customReadinessProbe: + exec: + command: ['/bin/bash', '-c', 'curl -s -m 2 http://localhost:8080/commands/ruok | grep error | grep null'] + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 ``` -The command removes all the Kubernetes components associated with the chart and deletes the release. +You can also set the log4j logging level and what log appenders are turned on, by using `ZOO_LOG4J_PROP` set inside of conf/log4j.properties as zookeeper.root.logger by default to + +```console +zookeeper.root.logger=INFO, CONSOLE +``` + +the available appender is + +- CONSOLE +- ROLLINGFILE +- RFAAUDIT +- TRACEFILE + +## Persistence + +The [Bitnami ZooKeeper](https://github.com/bitnami/containers/tree/main/bitnami/zookeeper) image stores the ZooKeeper data and configurations at the `/bitnami/zookeeper` path of the container. + +Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. See the [Parameters](#parameters) section to configure the PVC or to disable persistence. + +If you encounter errors when working with persistent volumes, refer to our [troubleshooting guide for persistent volumes](https://docs.bitnami.com/kubernetes/faq/troubleshooting/troubleshooting-persistence-volumes/). + +### Adjust permissions of persistent volume mountpoint + +As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. + +By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. +As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination. + +You can enable this initContainer by setting `volumePermissions.enabled` to `true`. + +### Configure the data log directory + +You can use a dedicated device for logs (instead of using the data directory) to help avoiding competition between logging and snaphots. To do so, set the `dataLogDir` parameter with the path to be used for writing transaction logs. Alternatively, set this parameter with an empty string and it will result in the log being written to the data directory (Zookeeper's default behavior). + +When using a dedicated device for logs, you can use a PVC to persist the logs. To do so, set `persistence.enabled` to `true`. See the [Persistence Parameters](#persistence-parameters) section for more information. + +### Set pod affinity + +This chart allows you to set custom pod affinity using the `affinity` parameter. Find more information about pod affinity in the [Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use any of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters. ## Parameters ### Global parameters -| Name | Description | Value | -| ------------------------- | ----------------------------------------------- | ----- | -| `global.imageRegistry` | Global Docker image registry | `""` | -| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | -| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | +| Name | Description | Value | +| ----------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------ | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | +| `global.compatibility.openshift.adaptSecurityContext` | Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation) | `auto` | ### Common parameters @@ -157,7 +237,7 @@ The command removes all the Kubernetes components associated with the chart and | `customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | | `customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | | `lifecycleHooks` | for the ZooKeeper container(s) to automate configuration before or after startup | `{}` | -| `resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `none` | +| `resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `micro` | | `resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | | `podSecurityContext.enabled` | Enabled ZooKeeper pods' Security Context | `true` | | `podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | @@ -165,12 +245,12 @@ The command removes all the Kubernetes components associated with the chart and | `podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | | `podSecurityContext.fsGroup` | Set ZooKeeper pod's Security Context fsGroup | `1001` | | `containerSecurityContext.enabled` | Enabled containers' Security Context | `true` | -| `containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `nil` | +| `containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | | `containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | -| `containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `0` | +| `containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | | `containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` | | `containerSecurityContext.privileged` | Set container's Security Context privileged | `false` | -| `containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `false` | +| `containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `true` | | `containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` | | `containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | | `containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | @@ -269,10 +349,10 @@ The command removes all the Kubernetes components associated with the chart and | `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | | `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | | `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` | -| `volumePermissions.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production). | `none` | +| `volumePermissions.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production). | `nano` | | `volumePermissions.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | | `volumePermissions.containerSecurityContext.enabled` | Enabled init container Security Context | `true` | -| `volumePermissions.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `nil` | +| `volumePermissions.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | | `volumePermissions.containerSecurityContext.runAsUser` | User ID for the init container | `0` | ### Metrics parameters @@ -329,7 +409,7 @@ The command removes all the Kubernetes components associated with the chart and | `tls.quorum.passwordsSecretTruststoreKey` | The secret key from the tls.quorum.passwordsSecretName containing the password for the Truststore. | `""` | | `tls.quorum.keystorePassword` | Password to access KeyStore if needed | `""` | | `tls.quorum.truststorePassword` | Password to access TrustStore if needed | `""` | -| `tls.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if tls.resources is set (tls.resources is recommended for production). | `none` | +| `tls.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if tls.resources is set (tls.resources is recommended for production). | `nano` | | `tls.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, @@ -355,101 +435,23 @@ helm install my-release -f values.yaml oci://REGISTRY_NAME/REPOSITORY_NAME/zooke > Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. > **Tip**: You can use the default [values.yaml](https://github.com/bitnami/charts/tree/main/bitnami/zookeeper/values.yaml) -## Configuration and installation details - -### Resource requests and limits - -Bitnami charts allow setting resource requests and limits for all containers inside the chart deployment. These are inside the `resources` value (check parameter table). Setting requests is essential for production workloads and these should be adapted to your specific use case. - -To make this process easier, the chart contains the `resourcesPreset` values, which automatically sets the `resources` section according to different presets. Check these presets in [the bitnami/common chart](https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15). However, in production workloads using `resourcePreset` is discouraged as it may not fully adapt to your specific needs. Find more information on container resource management in the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). - -### [Rolling vs Immutable tags](https://docs.bitnami.com/tutorials/understand-rolling-tags-containers) - -It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. - -Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. - -### Configure log level - -You can configure the ZooKeeper log level using the `ZOO_LOG_LEVEL` environment variable or the parameter `logLevel`. By default, it is set to `ERROR` because each use of the liveness probe and the readiness probe produces an `INFO` message on connection and a `WARN` message on disconnection, generating a high volume of noise in your logs. - -In order to remove that log noise so levels can be set to 'INFO', two changes must be made. - -First, ensure that you are not getting metrics via the deprecated pattern of polling 'mntr' on the ZooKeeper client port. The preferred method of polling for Apache ZooKeeper metrics is the ZooKeeper metrics server. This is supported in this chart when setting `metrics.enabled` to `true`. - -Second, to avoid the connection/disconnection messages from the probes, you can set custom values for these checks which direct them to the ZooKeeper Admin Server instead of the client port. By default, an Admin Server will be started that listens on `localhost` at port `8080`. The following is an example of this use of the Admin Server for probes: - -```yaml -livenessProbe: - enabled: false -readinessProbe: - enabled: false -customLivenessProbe: - exec: - command: ['/bin/bash', '-c', 'curl -s -m 2 http://localhost:8080/commands/ruok | grep ruok'] - initialDelaySeconds: 30 - periodSeconds: 10 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 6 -customReadinessProbe: - exec: - command: ['/bin/bash', '-c', 'curl -s -m 2 http://localhost:8080/commands/ruok | grep error | grep null'] - initialDelaySeconds: 5 - periodSeconds: 10 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 6 -``` - -You can also set the log4j logging level and what log appenders are turned on, by using `ZOO_LOG4J_PROP` set inside of conf/log4j.properties as zookeeper.root.logger by default to - -```console -zookeeper.root.logger=INFO, CONSOLE -``` - -the available appender is - -- CONSOLE -- ROLLINGFILE -- RFAAUDIT -- TRACEFILE - -## Persistence - -The [Bitnami ZooKeeper](https://github.com/bitnami/containers/tree/main/bitnami/zookeeper) image stores the ZooKeeper data and configurations at the `/bitnami/zookeeper` path of the container. - -Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. See the [Parameters](#parameters) section to configure the PVC or to disable persistence. - -If you encounter errors when working with persistent volumes, refer to our [troubleshooting guide for persistent volumes](https://docs.bitnami.com/kubernetes/faq/troubleshooting/troubleshooting-persistence-volumes/). - -### Adjust permissions of persistent volume mountpoint - -As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. - -By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. -As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination. - -You can enable this initContainer by setting `volumePermissions.enabled` to `true`. - -### Configure the data log directory - -You can use a dedicated device for logs (instead of using the data directory) to help avoiding competition between logging and snaphots. To do so, set the `dataLogDir` parameter with the path to be used for writing transaction logs. Alternatively, set this parameter with an empty string and it will result in the log being written to the data directory (Zookeeper's default behavior). - -When using a dedicated device for logs, you can use a PVC to persist the logs. To do so, set `persistence.enabled` to `true`. See the [Persistence Parameters](#persistence-parameters) section for more information. - -### Set pod affinity - -This chart allows you to set custom pod affinity using the `affinity` parameter. Find more information about pod affinity in the [Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). - -As an alternative, you can use any of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters. - ## Troubleshooting Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). ## Upgrading +### To 13.0.0 + +This major bump changes the following security defaults: + +- `runAsGroup` is changed from `0` to `1001` +- `readOnlyRootFilesystem` is set to `true` +- `resourcesPreset` is changed from `none` to the minimum size working in our test suites (NOTE: `resourcesPreset` is not meant for production usage, but `resources` adapted to your use case). +- `global.compatibility.openshift.adaptSecurityContext` is changed from `disabled` to `auto`. + +This could potentially break any customization or init scripts used in your deployment. If this is the case, change the default values to the previous ones. + ### To 12.0.0 This new version of the chart includes the new ZooKeeper major version 3.9.x. For more information, please refer to [Zookeeper 3.9.0 Release Notes](https://zookeeper.apache.org/doc/r3.9.0/releasenotes.html) @@ -550,4 +552,4 @@ Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and -limitations under the License. \ No newline at end of file +limitations under the License. diff --git a/charts/bitnami/kafka/charts/zookeeper/charts/common/Chart.yaml b/charts/bitnami/kafka/charts/zookeeper/charts/common/Chart.yaml index 33799499e..f86ccd23a 100644 --- a/charts/bitnami/kafka/charts/zookeeper/charts/common/Chart.yaml +++ b/charts/bitnami/kafka/charts/zookeeper/charts/common/Chart.yaml @@ -2,7 +2,7 @@ annotations: category: Infrastructure licenses: Apache-2.0 apiVersion: v2 -appVersion: 2.16.1 +appVersion: 2.19.0 description: A Library Helm Chart for grouping common logic between bitnami charts. This chart is not deployable by itself. home: https://bitnami.com @@ -20,4 +20,4 @@ name: common sources: - https://github.com/bitnami/charts type: library -version: 2.16.1 +version: 2.19.0 diff --git a/charts/bitnami/kafka/charts/zookeeper/charts/common/templates/_compatibility.tpl b/charts/bitnami/kafka/charts/zookeeper/charts/common/templates/_compatibility.tpl new file mode 100644 index 000000000..17665d567 --- /dev/null +++ b/charts/bitnami/kafka/charts/zookeeper/charts/common/templates/_compatibility.tpl @@ -0,0 +1,39 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Return true if the detected platform is Openshift +Usage: +{{- include "common.compatibility.isOpenshift" . -}} +*/}} +{{- define "common.compatibility.isOpenshift" -}} +{{- if .Capabilities.APIVersions.Has "security.openshift.io/v1" -}} +{{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Render a compatible securityContext depending on the platform. By default it is maintained as it is. In other platforms like Openshift we remove default user/group values that do not work out of the box with the restricted-v1 SCC +Usage: +{{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.containerSecurityContext "context" $) -}} +*/}} +{{- define "common.compatibility.renderSecurityContext" -}} +{{- $adaptedContext := .secContext -}} +{{- if .context.Values.global.compatibility -}} + {{- if .context.Values.global.compatibility.openshift -}} + {{- if or (eq .context.Values.global.compatibility.openshift.adaptSecurityContext "force") (and (eq .context.Values.global.compatibility.openshift.adaptSecurityContext "auto") (include "common.compatibility.isOpenshift" .context)) -}} + {{/* Remove incompatible user/group values that do not work in Openshift out of the box */}} + {{- $adaptedContext = omit $adaptedContext "fsGroup" "runAsUser" "runAsGroup" -}} + {{- if not .secContext.seLinuxOptions -}} + {{/* If it is an empty object, we remove it from the resulting context because it causes validation issues */}} + {{- $adaptedContext = omit $adaptedContext "seLinuxOptions" -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- omit $adaptedContext "enabled" | toYaml -}} +{{- end -}} diff --git a/charts/bitnami/kafka/charts/zookeeper/templates/statefulset.yaml b/charts/bitnami/kafka/charts/zookeeper/templates/statefulset.yaml index 56ac08459..82b2208de 100644 --- a/charts/bitnami/kafka/charts/zookeeper/templates/statefulset.yaml +++ b/charts/bitnami/kafka/charts/zookeeper/templates/statefulset.yaml @@ -74,7 +74,7 @@ spec: schedulerName: {{ .Values.schedulerName }} {{- end }} {{- if .Values.podSecurityContext.enabled }} - securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 8 }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.podSecurityContext "context" $) | nindent 8 }} {{- end }} {{- if .Values.dnsPolicy }} dnsPolicy: {{ .Values.dnsPolicy }} @@ -101,7 +101,7 @@ spec: find {{ .Values.dataLogDir }} -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} {{- end }} {{- if .Values.volumePermissions.containerSecurityContext.enabled }} - securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "enabled" | toYaml | nindent 12 }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.volumePermissions.containerSecurityContext "context" $) | nindent 12 }} {{- end }} {{- if .Values.volumePermissions.resources }} resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} @@ -124,7 +124,7 @@ spec: image: {{ include "zookeeper.image" . }} imagePullPolicy: {{ .Values.image.pullPolicy | quote }} {{- if .Values.containerSecurityContext.enabled }} - securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.containerSecurityContext "context" $) | nindent 12 }} {{- end }} command: - /scripts/init-certs.sh @@ -191,7 +191,7 @@ spec: image: {{ template "zookeeper.image" . }} imagePullPolicy: {{ .Values.image.pullPolicy | quote }} {{- if .Values.containerSecurityContext.enabled }} - securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.containerSecurityContext "context" $) | nindent 12 }} {{- end }} {{- if .Values.diagnosticMode.enabled }} command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} diff --git a/charts/bitnami/kafka/charts/zookeeper/values.yaml b/charts/bitnami/kafka/charts/zookeeper/values.yaml index 6424f6517..17870cf51 100644 --- a/charts/bitnami/kafka/charts/zookeeper/values.yaml +++ b/charts/bitnami/kafka/charts/zookeeper/values.yaml @@ -19,6 +19,15 @@ global: ## imagePullSecrets: [] storageClass: "" + ## Compatibility adaptations for Kubernetes platforms + ## + compatibility: + ## Compatibility adaptations for Openshift + ## + openshift: + ## @param global.compatibility.openshift.adaptSecurityContext Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation) + ## + adaptSecurityContext: auto ## @section Common parameters ## @@ -76,7 +85,7 @@ diagnosticMode: image: registry: docker.io repository: bitnami/zookeeper - tag: 3.9.1-debian-12-r15 + tag: 3.9.2-debian-12-r0 digest: "" ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' @@ -312,7 +321,7 @@ lifecycleHooks: {} ## @param resourcesPreset Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 ## -resourcesPreset: "none" +resourcesPreset: "micro" ## @param resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) ## Example: ## resources: @@ -353,12 +362,12 @@ podSecurityContext: ## containerSecurityContext: enabled: true - seLinuxOptions: null + seLinuxOptions: {} runAsUser: 1001 - runAsGroup: 0 + runAsGroup: 1001 runAsNonRoot: true privileged: false - readOnlyRootFilesystem: false + readOnlyRootFilesystem: true allowPrivilegeEscalation: false capabilities: drop: ["ALL"] @@ -767,7 +776,7 @@ volumePermissions: ## @param volumePermissions.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production). ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 ## - resourcesPreset: "none" + resourcesPreset: "nano" ## @param volumePermissions.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) ## Example: ## resources: @@ -788,7 +797,7 @@ volumePermissions: ## containerSecurityContext: enabled: true - seLinuxOptions: null + seLinuxOptions: {} runAsUser: 0 ## @section Metrics parameters ## @@ -974,7 +983,7 @@ tls: ## @param tls.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if tls.resources is set (tls.resources is recommended for production). ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 ## - resourcesPreset: "none" + resourcesPreset: "nano" ## @param tls.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) ## Example: ## resources: diff --git a/charts/bitnami/kafka/templates/broker/svc-external-access.yaml b/charts/bitnami/kafka/templates/broker/svc-external-access.yaml index df286dfe5..74c892661 100644 --- a/charts/bitnami/kafka/templates/broker/svc-external-access.yaml +++ b/charts/bitnami/kafka/templates/broker/svc-external-access.yaml @@ -8,6 +8,7 @@ SPDX-License-Identifier: APACHE-2.0 {{- $replicaCount := .Values.broker.replicaCount | int }} {{- range $i := until $replicaCount }} {{- $targetPod := printf "%s-broker-%d" (printf "%s" $fullname) $i }} +{{- $_ := set $ "targetPod" $targetPod }} apiVersion: v1 kind: Service metadata: diff --git a/charts/bitnami/kafka/templates/controller-eligible/svc-external-access.yaml b/charts/bitnami/kafka/templates/controller-eligible/svc-external-access.yaml index 4bdb65a62..43d6d7a9e 100644 --- a/charts/bitnami/kafka/templates/controller-eligible/svc-external-access.yaml +++ b/charts/bitnami/kafka/templates/controller-eligible/svc-external-access.yaml @@ -9,6 +9,7 @@ SPDX-License-Identifier: APACHE-2.0 {{- $replicaCount := .Values.controller.replicaCount | int }} {{- range $i := until $replicaCount }} {{- $targetPod := printf "%s-controller-%d" $fullname $i }} +{{- $_ := set $ "targetPod" $targetPod }} apiVersion: v1 kind: Service metadata: diff --git a/charts/bitnami/kafka/templates/network-policy/networkpolicy-egress.yaml b/charts/bitnami/kafka/templates/network-policy/networkpolicy-egress.yaml deleted file mode 100644 index 47b8ec1d0..000000000 --- a/charts/bitnami/kafka/templates/network-policy/networkpolicy-egress.yaml +++ /dev/null @@ -1,23 +0,0 @@ -{{- /* -Copyright VMware, Inc. -SPDX-License-Identifier: APACHE-2.0 -*/}} - -{{- if and .Values.networkPolicy.enabled .Values.networkPolicy.egressRules.customRules }} -kind: NetworkPolicy -apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} -metadata: - name: {{ printf "%s-egress" (include "common.names.fullname" .) }} - namespace: {{ include "common.names.namespace" . | quote }} - labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} - {{- if .Values.commonAnnotations }} - annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -spec: - podSelector: - matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 6 }} - policyTypes: - - Egress - egress: - {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.egressRules.customRules "context" $) | nindent 4 }} -{{- end }} diff --git a/charts/bitnami/kafka/templates/network-policy/networkpolicy-ingress.yaml b/charts/bitnami/kafka/templates/network-policy/networkpolicy-ingress.yaml deleted file mode 100644 index 47314bfcc..000000000 --- a/charts/bitnami/kafka/templates/network-policy/networkpolicy-ingress.yaml +++ /dev/null @@ -1,53 +0,0 @@ -{{- /* -Copyright VMware, Inc. -SPDX-License-Identifier: APACHE-2.0 -*/}} - -{{- if .Values.networkPolicy.enabled }} -kind: NetworkPolicy -apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} -metadata: - name: {{ printf "%s-ingress" (include "common.names.fullname" .) }} - namespace: {{ include "common.names.namespace" . | quote }} - labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} - {{- if .Values.commonAnnotations }} - annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -spec: - podSelector: - matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 6 }} - policyTypes: - - Ingress - ingress: - # Allow client connections - - ports: - - port: {{ .Values.listeners.client.containerPort }} - {{- if not .Values.networkPolicy.allowExternal }} - from: - - podSelector: - matchLabels: - {{ template "common.names.fullname" . }}-client: "true" - {{- if .Values.networkPolicy.explicitNamespacesSelector }} - namespaceSelector: {{- toYaml .Values.networkPolicy.explicitNamespacesSelector | nindent 12 }} - {{- end }} - {{- end }} - # Allow communication inter-broker - - ports: - - port: {{ .Values.listeners.interbroker.containerPort }} - from: - - podSelector: - matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 14 }} - # Allow External connection - {{- if .Values.externalAccess.enabled }} - - ports: - - port: {{ .Values.listeners.external.containerPort }} - {{- if .Values.networkPolicy.externalAccess.from }} - from: {{- include "common.tplvalues.render" ( dict "value" .Values.networkPolicy.externalAccess.from "context" $ ) | nindent 8 }} - {{- end }} - {{- end }} - {{- if .Values.metrics.kafka.enabled }} - # Allow prometheus scrapes - - ports: - - port: {{ .Values.metrics.kafka.containerPorts.metrics }} - {{- end }} -{{- end }} diff --git a/charts/bitnami/kafka/templates/networkpolicy.yaml b/charts/bitnami/kafka/templates/networkpolicy.yaml new file mode 100644 index 000000000..5b2e33db5 --- /dev/null +++ b/charts/bitnami/kafka/templates/networkpolicy.yaml @@ -0,0 +1,86 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }} + podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + policyTypes: + - Ingress + - Egress + {{- if .Values.networkPolicy.allowExternalEgress }} + egress: + - {} + {{- else }} + egress: + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + # Allow internal communications between nodes + - ports: + - port: {{ .Values.listeners.client.containerPort }} + - port: {{ .Values.listeners.interbroker.containerPort }} + {{- if .Values.externalAccess.enabled }} + - port: {{ .Values.listeners.external.containerPort }} + {{- end }} + to: + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 14 }} + {{- if .Values.networkPolicy.extraEgress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.rts.networkPolicy.extraEgress "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} + ingress: + # Allow client connections + - ports: + - port: {{ .Values.listeners.client.containerPort }} + - port: {{ .Values.listeners.interbroker.containerPort }} + {{- if .Values.externalAccess.enabled }} + - port: {{ .Values.listeners.external.containerPort }} + {{- end }} + {{- if .Values.metrics.kafka.enabled }} + - port: {{ .Values.metrics.kafka.containerPorts.metrics }} + {{- end }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 14 }} + - podSelector: + matchLabels: + {{ include "common.names.fullname" . }}-client: "true" + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 14 }} + {{- if .Values.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: + {{- range $key, $value := .Values.networkPolicy.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- if .Values.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.networkPolicy.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.networkPolicy.extraIngress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.networkPolicy.extraIngress "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/charts/bitnami/kafka/values.yaml b/charts/bitnami/kafka/values.yaml index fe7081cc6..cc0b43a00 100644 --- a/charts/bitnami/kafka/values.yaml +++ b/charts/bitnami/kafka/values.yaml @@ -27,7 +27,7 @@ global: openshift: ## @param global.compatibility.openshift.adaptSecurityContext Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation) ## - adaptSecurityContext: disabled + adaptSecurityContext: auto ## @section Common parameters ## @@ -599,7 +599,7 @@ controller: ## @param controller.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if controller.resources is set (controller.resources is recommended for production). ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 ## - resourcesPreset: "none" + resourcesPreset: "small" ## @param controller.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) ## Example: ## resources: @@ -632,7 +632,9 @@ controller: ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container ## @param controller.containerSecurityContext.enabled Enable Kafka containers' Security Context ## @param controller.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container - ## @param controller.containerSecurityContext.runAsUser Set Kafka containers' Security Context runAsUser + ## @param controller.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param controller.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup + ## @param controller.containerSecurityContext.runAsGroup Set Kafka containers' Security Context runAsGroup ## @param controller.containerSecurityContext.runAsNonRoot Set Kafka containers' Security Context runAsNonRoot ## @param controller.containerSecurityContext.allowPrivilegeEscalation Force the child process to be run as non-privileged ## @param controller.containerSecurityContext.readOnlyRootFilesystem Allows the pod to mount the RootFS as ReadOnly only @@ -646,8 +648,9 @@ controller: ## containerSecurityContext: enabled: true - seLinuxOptions: null + seLinuxOptions: {} runAsUser: 1001 + runAsGroup: 1001 runAsNonRoot: true allowPrivilegeEscalation: false readOnlyRootFilesystem: true @@ -1007,7 +1010,7 @@ broker: ## @param broker.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if broker.resources is set (broker.resources is recommended for production). ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 ## - resourcesPreset: "none" + resourcesPreset: "small" ## @param broker.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) ## Example: ## resources: @@ -1040,7 +1043,8 @@ broker: ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container ## @param broker.containerSecurityContext.enabled Enable Kafka containers' Security Context ## @param broker.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container - ## @param broker.containerSecurityContext.runAsUser Set Kafka containers' Security Context runAsUser + ## @param broker.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param broker.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup ## @param broker.containerSecurityContext.runAsNonRoot Set Kafka containers' Security Context runAsNonRoot ## @param broker.containerSecurityContext.allowPrivilegeEscalation Force the child process to be run as non-privileged ## @param broker.containerSecurityContext.readOnlyRootFilesystem Allows the pod to mount the RootFS as ReadOnly only @@ -1054,8 +1058,9 @@ broker: ## containerSecurityContext: enabled: true - seLinuxOptions: null + seLinuxOptions: {} runAsUser: 1001 + runAsGroup: 1001 runAsNonRoot: true allowPrivilegeEscalation: false readOnlyRootFilesystem: true @@ -1413,7 +1418,7 @@ externalAccess: ## @param externalAccess.autoDiscovery.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if externalAccess.autoDiscovery.resources is set (externalAccess.autoDiscovery.resources is recommended for production). ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 ## - resourcesPreset: "none" + resourcesPreset: "nano" ## @param externalAccess.autoDiscovery.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) ## Example: ## resources: @@ -1429,7 +1434,8 @@ externalAccess: ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container ## @param externalAccess.autoDiscovery.containerSecurityContext.enabled Enable Kafka auto-discovery containers' Security Context ## @param externalAccess.autoDiscovery.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container - ## @param externalAccess.autoDiscovery.containerSecurityContext.runAsUser Set Kafka auto-discovery containers' Security Context runAsUser + ## @param externalAccess.autoDiscovery.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param externalAccess.autoDiscovery.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup ## @param externalAccess.autoDiscovery.containerSecurityContext.runAsNonRoot Set Kafka auto-discovery containers' Security Context runAsNonRoot ## @param externalAccess.autoDiscovery.containerSecurityContext.allowPrivilegeEscalation Set Kafka auto-discovery containers' Security Context allowPrivilegeEscalation ## @param externalAccess.autoDiscovery.containerSecurityContext.readOnlyRootFilesystem Set Kafka auto-discovery containers' Security Context readOnlyRootFilesystem @@ -1444,8 +1450,9 @@ externalAccess: ## containerSecurityContext: enabled: true - seLinuxOptions: null + seLinuxOptions: {} runAsUser: 1001 + runAsGroup: 1001 runAsNonRoot: true allowPrivilegeEscalation: false readOnlyRootFilesystem: true @@ -1627,47 +1634,53 @@ externalAccess: networkPolicy: ## @param networkPolicy.enabled Specifies whether a NetworkPolicy should be created ## - enabled: false + enabled: true ## @param networkPolicy.allowExternal Don't require client label for connections - ## When set to false, only pods with the correct client label will have network access to the port Kafka is + ## When set to false, only pods with the correct client label will have network access to the port Redis® is ## listening on. When true, zookeeper accept connections from any source (with the correct destination port). ## allowExternal: true - ## @param networkPolicy.explicitNamespacesSelector A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed - ## If explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace - ## and that match other criteria, the ones that have the good label, can reach the kafka. - ## But sometimes, we want the kafka to be accessible to clients from other namespaces, in this case, we can use this - ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## @param networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. ## + allowExternalEgress: true + ## @param networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolice ## e.g: - ## explicitNamespacesSelector: - ## matchLabels: - ## role: frontend - ## matchExpressions: - ## - {key: role, operator: In, values: [frontend]} - ## - explicitNamespacesSelector: {} - ## @param networkPolicy.externalAccess.from customize the from section for External Access on tcp-external port + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + extraIngress: [] + ## @param networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy ## e.g: - ## - ipBlock: - ## cidr: 172.9.0.0/16 - ## except: - ## - 172.9.1.0/24 + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend ## - externalAccess: - from: [] - ## @param networkPolicy.egressRules.customRules [object] Custom network policy rule + extraEgress: [] + ## @param networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces + ## @param networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces ## - egressRules: - ## Additional custom egress rules - ## e.g: - ## customRules: - ## - to: - ## - namespaceSelector: - ## matchLabels: - ## label: example - ## - customRules: [] + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} ## @section Volume Permissions parameters ## @@ -1704,7 +1717,7 @@ volumePermissions: ## @param volumePermissions.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production). ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 ## - resourcesPreset: "none" + resourcesPreset: "nano" ## @param volumePermissions.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) ## Example: ## resources: @@ -1723,7 +1736,7 @@ volumePermissions: ## @param volumePermissions.containerSecurityContext.runAsUser User ID for the init container ## containerSecurityContext: - seLinuxOptions: null + seLinuxOptions: {} runAsUser: 0 ## @section Other Parameters ## @@ -1885,7 +1898,7 @@ metrics: ## @param metrics.kafka.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if metrics.kafka.resources is set (metrics.kafka.resources is recommended for production). ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 ## - resourcesPreset: "none" + resourcesPreset: "micro" ## @param metrics.kafka.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) ## Example: ## resources: @@ -1918,7 +1931,8 @@ metrics: ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container ## @param metrics.kafka.containerSecurityContext.enabled Enable Kafka exporter containers' Security Context ## @param metrics.kafka.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container - ## @param metrics.kafka.containerSecurityContext.runAsUser Set Kafka exporter containers' Security Context runAsUser + ## @param metrics.kafka.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param metrics.kafka.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup ## @param metrics.kafka.containerSecurityContext.runAsNonRoot Set Kafka exporter containers' Security Context runAsNonRoot ## @param metrics.kafka.containerSecurityContext.allowPrivilegeEscalation Set Kafka exporter containers' Security Context allowPrivilegeEscalation ## @param metrics.kafka.containerSecurityContext.readOnlyRootFilesystem Set Kafka exporter containers' Security Context readOnlyRootFilesystem @@ -1932,8 +1946,9 @@ metrics: ## containerSecurityContext: enabled: true - seLinuxOptions: null + seLinuxOptions: {} runAsUser: 1001 + runAsGroup: 1001 runAsNonRoot: true allowPrivilegeEscalation: false readOnlyRootFilesystem: true @@ -2124,7 +2139,8 @@ metrics: ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container ## @param metrics.jmx.containerSecurityContext.enabled Enable Prometheus JMX exporter containers' Security Context ## @param metrics.jmx.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container - ## @param metrics.jmx.containerSecurityContext.runAsUser Set Prometheus JMX exporter containers' Security Context runAsUser + ## @param metrics.jmx.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param metrics.jmx.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup ## @param metrics.jmx.containerSecurityContext.runAsNonRoot Set Prometheus JMX exporter containers' Security Context runAsNonRoot ## @param metrics.jmx.containerSecurityContext.allowPrivilegeEscalation Set Prometheus JMX exporter containers' Security Context allowPrivilegeEscalation ## @param metrics.jmx.containerSecurityContext.readOnlyRootFilesystem Set Prometheus JMX exporter containers' Security Context readOnlyRootFilesystem @@ -2138,8 +2154,9 @@ metrics: ## containerSecurityContext: enabled: true - seLinuxOptions: null + seLinuxOptions: {} runAsUser: 1001 + runAsGroup: 1001 runAsNonRoot: true allowPrivilegeEscalation: false readOnlyRootFilesystem: true @@ -2154,7 +2171,7 @@ metrics: ## @param metrics.jmx.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if metrics.jmx.resources is set (metrics.jmx.resources is recommended for production). ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 ## - resourcesPreset: "none" + resourcesPreset: "micro" ## @param metrics.jmx.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) ## Example: ## resources: @@ -2439,7 +2456,7 @@ provisioning: ## @param provisioning.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if provisioning.resources is set (provisioning.resources is recommended for production). ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 ## - resourcesPreset: "none" + resourcesPreset: "micro" ## @param provisioning.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) ## Example: ## resources: @@ -2472,7 +2489,8 @@ provisioning: ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container ## @param provisioning.containerSecurityContext.enabled Enable Kafka provisioning containers' Security Context ## @param provisioning.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container - ## @param provisioning.containerSecurityContext.runAsUser Set Kafka provisioning containers' Security Context runAsUser + ## @param provisioning.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param provisioning.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup ## @param provisioning.containerSecurityContext.runAsNonRoot Set Kafka provisioning containers' Security Context runAsNonRoot ## @param provisioning.containerSecurityContext.allowPrivilegeEscalation Set Kafka provisioning containers' Security Context allowPrivilegeEscalation ## @param provisioning.containerSecurityContext.readOnlyRootFilesystem Set Kafka provisioning containers' Security Context readOnlyRootFilesystem @@ -2486,8 +2504,9 @@ provisioning: ## containerSecurityContext: enabled: true - seLinuxOptions: null + seLinuxOptions: {} runAsUser: 1001 + runAsGroup: 1001 runAsNonRoot: true allowPrivilegeEscalation: false readOnlyRootFilesystem: true diff --git a/charts/bitnami/mariadb/Chart.lock b/charts/bitnami/mariadb/Chart.lock index 220f0e4b2..0d8862e38 100644 --- a/charts/bitnami/mariadb/Chart.lock +++ b/charts/bitnami/mariadb/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: common repository: oci://registry-1.docker.io/bitnamicharts - version: 2.19.0 -digest: sha256:ac559eb57710d8904e266424ee364cd686d7e24517871f0c5c67f7c4500c2bcc -generated: "2024-03-08T11:25:32.224991562+01:00" + version: 2.19.1 +digest: sha256:c883732817d9aaa3304f7b3109262aa338959de15b432dc5a2dbde13d2e136a5 +generated: "2024-04-02T11:21:12.855408532Z" diff --git a/charts/bitnami/mariadb/Chart.yaml b/charts/bitnami/mariadb/Chart.yaml index 7abe74bc9..84d72ff1d 100644 --- a/charts/bitnami/mariadb/Chart.yaml +++ b/charts/bitnami/mariadb/Chart.yaml @@ -6,14 +6,14 @@ annotations: category: Database images: | - name: mariadb - image: docker.io/bitnami/mariadb:11.2.3-debian-12-r4 + image: docker.io/bitnami/mariadb:11.3.2-debian-12-r1 - name: mysqld-exporter - image: docker.io/bitnami/mysqld-exporter:0.15.1-debian-12-r8 + image: docker.io/bitnami/mysqld-exporter:0.15.1-debian-12-r10 - name: os-shell - image: docker.io/bitnami/os-shell:12-debian-12-r16 + image: docker.io/bitnami/os-shell:12-debian-12-r18 licenses: Apache-2.0 apiVersion: v2 -appVersion: 11.2.3 +appVersion: 11.3.2 dependencies: - name: common repository: file://./charts/common @@ -37,4 +37,4 @@ maintainers: name: mariadb sources: - https://github.com/bitnami/charts/tree/main/bitnami/mariadb -version: 17.0.1 +version: 18.0.1 diff --git a/charts/bitnami/mariadb/charts/common/Chart.yaml b/charts/bitnami/mariadb/charts/common/Chart.yaml index f86ccd23a..8d0e54694 100644 --- a/charts/bitnami/mariadb/charts/common/Chart.yaml +++ b/charts/bitnami/mariadb/charts/common/Chart.yaml @@ -2,7 +2,7 @@ annotations: category: Infrastructure licenses: Apache-2.0 apiVersion: v2 -appVersion: 2.19.0 +appVersion: 2.19.1 description: A Library Helm Chart for grouping common logic between bitnami charts. This chart is not deployable by itself. home: https://bitnami.com @@ -20,4 +20,4 @@ name: common sources: - https://github.com/bitnami/charts type: library -version: 2.19.0 +version: 2.19.1 diff --git a/charts/bitnami/mariadb/charts/common/templates/_resources.tpl b/charts/bitnami/mariadb/charts/common/templates/_resources.tpl index d90f8752d..030fa1a99 100644 --- a/charts/bitnami/mariadb/charts/common/templates/_resources.tpl +++ b/charts/bitnami/mariadb/charts/common/templates/_resources.tpl @@ -11,7 +11,7 @@ These presets are for basic testing and not meant to be used in production {{ include "common.resources.preset" (dict "type" "nano") -}} */}} {{- define "common.resources.preset" -}} -{{/* The limits are the requests increased by 50% (except ephemeral-storage)*/}} +{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}} {{- $presets := dict "nano" (dict "requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi") @@ -34,11 +34,11 @@ These presets are for basic testing and not meant to be used in production "limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "1024Mi") ) "xlarge" (dict - "requests" (dict "cpu" "2.0" "memory" "4096Mi" "ephemeral-storage" "50Mi") + "requests" (dict "cpu" "1.5" "memory" "4096Mi" "ephemeral-storage" "50Mi") "limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "1024Mi") ) "2xlarge" (dict - "requests" (dict "cpu" "4.0" "memory" "8192Mi" "ephemeral-storage" "50Mi") + "requests" (dict "cpu" "1.5" "memory" "4096Mi" "ephemeral-storage" "50Mi") "limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "1024Mi") ) }} @@ -47,4 +47,4 @@ These presets are for basic testing and not meant to be used in production {{- else -}} {{- printf "ERROR: Preset key '%s' invalid. Allowed values are %s" .type (join "," (keys $presets)) | fail -}} {{- end -}} -{{- end -}} \ No newline at end of file +{{- end -}} diff --git a/charts/bitnami/mariadb/values.yaml b/charts/bitnami/mariadb/values.yaml index 64d1a00e0..20ece8255 100644 --- a/charts/bitnami/mariadb/values.yaml +++ b/charts/bitnami/mariadb/values.yaml @@ -95,7 +95,7 @@ serviceBindings: image: registry: docker.io repository: bitnami/mariadb - tag: 11.2.3-debian-12-r4 + tag: 11.3.2-debian-12-r1 digest: "" ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' @@ -1053,7 +1053,7 @@ volumePermissions: image: registry: docker.io repository: bitnami/os-shell - tag: 12-debian-12-r16 + tag: 12-debian-12-r18 digest: "" pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) @@ -1097,7 +1097,7 @@ metrics: image: registry: docker.io repository: bitnami/mysqld-exporter - tag: 0.15.1-debian-12-r8 + tag: 0.15.1-debian-12-r10 digest: "" pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) @@ -1321,8 +1321,8 @@ networkPolicy: ## enabled: true ## @param networkPolicy.allowExternal The Policy model to apply - ## When set to false, only pods with the correct client label will have network access to the ports Keycloak is - ## listening on. When true, Keycloak will accept connections from any source (with the correct destination port). + ## When set to false, only pods with the correct client label will have network access to the ports MariaDB is + ## listening on. When true, MariaDB will accept connections from any source (with the correct destination port). ## allowExternal: true ## @param networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. diff --git a/charts/bitnami/mysql/Chart.lock b/charts/bitnami/mysql/Chart.lock index bf1b66b17..5012cf583 100644 --- a/charts/bitnami/mysql/Chart.lock +++ b/charts/bitnami/mysql/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: common repository: oci://registry-1.docker.io/bitnamicharts - version: 2.19.0 -digest: sha256:ac559eb57710d8904e266424ee364cd686d7e24517871f0c5c67f7c4500c2bcc -generated: "2024-03-08T11:23:56.170052821+01:00" + version: 2.19.1 +digest: sha256:c883732817d9aaa3304f7b3109262aa338959de15b432dc5a2dbde13d2e136a5 +generated: "2024-04-02T10:58:49.170367596Z" diff --git a/charts/bitnami/mysql/Chart.yaml b/charts/bitnami/mysql/Chart.yaml index fd0d9cbfd..80b4a30df 100644 --- a/charts/bitnami/mysql/Chart.yaml +++ b/charts/bitnami/mysql/Chart.yaml @@ -6,11 +6,11 @@ annotations: category: Database images: | - name: mysql - image: docker.io/bitnami/mysql:8.0.36-debian-12-r8 + image: docker.io/bitnami/mysql:8.0.36-debian-12-r10 - name: mysqld-exporter - image: docker.io/bitnami/mysqld-exporter:0.15.1-debian-12-r8 + image: docker.io/bitnami/mysqld-exporter:0.15.1-debian-12-r10 - name: os-shell - image: docker.io/bitnami/os-shell:12-debian-12-r16 + image: docker.io/bitnami/os-shell:12-debian-12-r18 licenses: Apache-2.0 apiVersion: v2 appVersion: 8.0.36 @@ -36,4 +36,4 @@ maintainers: name: mysql sources: - https://github.com/bitnami/charts/tree/main/bitnami/mysql -version: 10.1.0 +version: 10.1.1 diff --git a/charts/bitnami/mysql/charts/common/Chart.yaml b/charts/bitnami/mysql/charts/common/Chart.yaml index f86ccd23a..8d0e54694 100644 --- a/charts/bitnami/mysql/charts/common/Chart.yaml +++ b/charts/bitnami/mysql/charts/common/Chart.yaml @@ -2,7 +2,7 @@ annotations: category: Infrastructure licenses: Apache-2.0 apiVersion: v2 -appVersion: 2.19.0 +appVersion: 2.19.1 description: A Library Helm Chart for grouping common logic between bitnami charts. This chart is not deployable by itself. home: https://bitnami.com @@ -20,4 +20,4 @@ name: common sources: - https://github.com/bitnami/charts type: library -version: 2.19.0 +version: 2.19.1 diff --git a/charts/bitnami/mysql/charts/common/templates/_resources.tpl b/charts/bitnami/mysql/charts/common/templates/_resources.tpl index d90f8752d..030fa1a99 100644 --- a/charts/bitnami/mysql/charts/common/templates/_resources.tpl +++ b/charts/bitnami/mysql/charts/common/templates/_resources.tpl @@ -11,7 +11,7 @@ These presets are for basic testing and not meant to be used in production {{ include "common.resources.preset" (dict "type" "nano") -}} */}} {{- define "common.resources.preset" -}} -{{/* The limits are the requests increased by 50% (except ephemeral-storage)*/}} +{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}} {{- $presets := dict "nano" (dict "requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi") @@ -34,11 +34,11 @@ These presets are for basic testing and not meant to be used in production "limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "1024Mi") ) "xlarge" (dict - "requests" (dict "cpu" "2.0" "memory" "4096Mi" "ephemeral-storage" "50Mi") + "requests" (dict "cpu" "1.5" "memory" "4096Mi" "ephemeral-storage" "50Mi") "limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "1024Mi") ) "2xlarge" (dict - "requests" (dict "cpu" "4.0" "memory" "8192Mi" "ephemeral-storage" "50Mi") + "requests" (dict "cpu" "1.5" "memory" "4096Mi" "ephemeral-storage" "50Mi") "limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "1024Mi") ) }} @@ -47,4 +47,4 @@ These presets are for basic testing and not meant to be used in production {{- else -}} {{- printf "ERROR: Preset key '%s' invalid. Allowed values are %s" .type (join "," (keys $presets)) | fail -}} {{- end -}} -{{- end -}} \ No newline at end of file +{{- end -}} diff --git a/charts/bitnami/mysql/values.yaml b/charts/bitnami/mysql/values.yaml index db3cecfff..25627ec52 100644 --- a/charts/bitnami/mysql/values.yaml +++ b/charts/bitnami/mysql/values.yaml @@ -90,7 +90,7 @@ diagnosticMode: image: registry: docker.io repository: bitnami/mysql - tag: 8.0.36-debian-12-r8 + tag: 8.0.36-debian-12-r10 digest: "" ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' @@ -1063,8 +1063,8 @@ networkPolicy: ## enabled: true ## @param networkPolicy.allowExternal The Policy model to apply - ## When set to false, only pods with the correct client label will have network access to the ports Keycloak is - ## listening on. When true, Keycloak will accept connections from any source (with the correct destination port). + ## When set to false, only pods with the correct client label will have network access to the ports MySQL is + ## listening on. When true, MySQL will accept connections from any source (with the correct destination port). ## allowExternal: true ## @param networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. @@ -1129,7 +1129,7 @@ volumePermissions: image: registry: docker.io repository: bitnami/os-shell - tag: 12-debian-12-r16 + tag: 12-debian-12-r18 digest: "" pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. @@ -1174,7 +1174,7 @@ metrics: image: registry: docker.io repository: bitnami/mysqld-exporter - tag: 0.15.1-debian-12-r8 + tag: 0.15.1-debian-12-r10 digest: "" pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. diff --git a/charts/bitnami/postgresql/Chart.lock b/charts/bitnami/postgresql/Chart.lock index 5320fb8e1..c4a252316 100644 --- a/charts/bitnami/postgresql/Chart.lock +++ b/charts/bitnami/postgresql/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: common repository: oci://registry-1.docker.io/bitnamicharts - version: 2.19.0 -digest: sha256:ac559eb57710d8904e266424ee364cd686d7e24517871f0c5c67f7c4500c2bcc -generated: "2024-03-11T20:27:44.112846437Z" + version: 2.19.1 +digest: sha256:c883732817d9aaa3304f7b3109262aa338959de15b432dc5a2dbde13d2e136a5 +generated: "2024-04-02T18:43:43.860368523Z" diff --git a/charts/bitnami/postgresql/Chart.yaml b/charts/bitnami/postgresql/Chart.yaml index 73bfc2d5f..fa18d4f6c 100644 --- a/charts/bitnami/postgresql/Chart.yaml +++ b/charts/bitnami/postgresql/Chart.yaml @@ -6,11 +6,11 @@ annotations: category: Database images: | - name: os-shell - image: docker.io/bitnami/os-shell:12-debian-12-r17 + image: docker.io/bitnami/os-shell:12-debian-12-r18 - name: postgres-exporter - image: docker.io/bitnami/postgres-exporter:0.15.0-debian-12-r14 + image: docker.io/bitnami/postgres-exporter:0.15.0-debian-12-r15 - name: postgresql - image: docker.io/bitnami/postgresql:16.2.0-debian-12-r10 + image: docker.io/bitnami/postgresql:16.2.0-debian-12-r12 licenses: Apache-2.0 apiVersion: v2 appVersion: 16.2.0 @@ -38,4 +38,4 @@ maintainers: name: postgresql sources: - https://github.com/bitnami/charts/tree/main/bitnami/postgresql -version: 15.1.4 +version: 15.2.2 diff --git a/charts/bitnami/postgresql/README.md b/charts/bitnami/postgresql/README.md index 1490b5e96..658cad6f0 100644 --- a/charts/bitnami/postgresql/README.md +++ b/charts/bitnami/postgresql/README.md @@ -478,6 +478,7 @@ If you already have data in it, you will fail to sync to standby nodes for all c | `primary.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | | `primary.service.headless.annotations` | Additional custom annotations for headless PostgreSQL primary service | `{}` | | `primary.persistence.enabled` | Enable PostgreSQL Primary data persistence using PVC | `true` | +| `primary.persistence.volumeName` | Name to assign the volume | `data` | | `primary.persistence.existingClaim` | Name of an existing PVC to use | `""` | | `primary.persistence.mountPath` | The path the volume will be mounted at | `/bitnami/postgresql` | | `primary.persistence.subPath` | The subdirectory of the volume to mount to | `""` | diff --git a/charts/bitnami/postgresql/charts/common/Chart.yaml b/charts/bitnami/postgresql/charts/common/Chart.yaml index f86ccd23a..8d0e54694 100644 --- a/charts/bitnami/postgresql/charts/common/Chart.yaml +++ b/charts/bitnami/postgresql/charts/common/Chart.yaml @@ -2,7 +2,7 @@ annotations: category: Infrastructure licenses: Apache-2.0 apiVersion: v2 -appVersion: 2.19.0 +appVersion: 2.19.1 description: A Library Helm Chart for grouping common logic between bitnami charts. This chart is not deployable by itself. home: https://bitnami.com @@ -20,4 +20,4 @@ name: common sources: - https://github.com/bitnami/charts type: library -version: 2.19.0 +version: 2.19.1 diff --git a/charts/bitnami/postgresql/charts/common/templates/_resources.tpl b/charts/bitnami/postgresql/charts/common/templates/_resources.tpl index d90f8752d..030fa1a99 100644 --- a/charts/bitnami/postgresql/charts/common/templates/_resources.tpl +++ b/charts/bitnami/postgresql/charts/common/templates/_resources.tpl @@ -11,7 +11,7 @@ These presets are for basic testing and not meant to be used in production {{ include "common.resources.preset" (dict "type" "nano") -}} */}} {{- define "common.resources.preset" -}} -{{/* The limits are the requests increased by 50% (except ephemeral-storage)*/}} +{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}} {{- $presets := dict "nano" (dict "requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi") @@ -34,11 +34,11 @@ These presets are for basic testing and not meant to be used in production "limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "1024Mi") ) "xlarge" (dict - "requests" (dict "cpu" "2.0" "memory" "4096Mi" "ephemeral-storage" "50Mi") + "requests" (dict "cpu" "1.5" "memory" "4096Mi" "ephemeral-storage" "50Mi") "limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "1024Mi") ) "2xlarge" (dict - "requests" (dict "cpu" "4.0" "memory" "8192Mi" "ephemeral-storage" "50Mi") + "requests" (dict "cpu" "1.5" "memory" "4096Mi" "ephemeral-storage" "50Mi") "limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "1024Mi") ) }} @@ -47,4 +47,4 @@ These presets are for basic testing and not meant to be used in production {{- else -}} {{- printf "ERROR: Preset key '%s' invalid. Allowed values are %s" .type (join "," (keys $presets)) | fail -}} {{- end -}} -{{- end -}} \ No newline at end of file +{{- end -}} diff --git a/charts/bitnami/postgresql/templates/primary/statefulset.yaml b/charts/bitnami/postgresql/templates/primary/statefulset.yaml index c08191bbd..9d306dd84 100644 --- a/charts/bitnami/postgresql/templates/primary/statefulset.yaml +++ b/charts/bitnami/postgresql/templates/primary/statefulset.yaml @@ -162,7 +162,7 @@ spec: - name: empty-dir mountPath: /tmp subPath: tmp-dir - - name: data + - name: {{ .Values.primary.persistence.volumeName }} mountPath: {{ .Values.primary.persistence.mountPath }} {{- if .Values.primary.persistence.subPath }} subPath: {{ .Values.primary.persistence.subPath }} @@ -494,13 +494,11 @@ spec: - name: dshm mountPath: /dev/shm {{- end }} - {{- if .Values.primary.persistence.enabled }} - - name: data + - name: {{ .Values.primary.persistence.volumeName }} mountPath: {{ .Values.primary.persistence.mountPath }} {{- if .Values.primary.persistence.subPath }} subPath: {{ .Values.primary.persistence.subPath }} {{- end }} - {{- end }} {{- if or .Values.primary.configuration .Values.primary.pgHbaConfiguration .Values.primary.existingConfigmap }} - name: postgresql-config mountPath: {{ .Values.primary.persistence.mountPath }}/conf @@ -650,11 +648,11 @@ spec: {{- end }} {{- end }} {{- if and .Values.primary.persistence.enabled .Values.primary.persistence.existingClaim }} - - name: data + - name: {{ .Values.primary.persistence.volumeName }} persistentVolumeClaim: claimName: {{ tpl .Values.primary.persistence.existingClaim $ }} {{- else if not .Values.primary.persistence.enabled }} - - name: data + - name: {{ .Values.primary.persistence.volumeName }} emptyDir: {} {{- else }} {{- if .Values.primary.persistentVolumeClaimRetentionPolicy.enabled }} @@ -666,7 +664,7 @@ spec: - apiVersion: v1 kind: PersistentVolumeClaim metadata: - name: data + name: {{ .Values.primary.persistence.volumeName }} {{- if .Values.primary.persistence.annotations }} annotations: {{- include "common.tplvalues.render" (dict "value" .Values.primary.persistence.annotations "context" $) | nindent 10 }} {{- end }} diff --git a/charts/bitnami/postgresql/templates/read/statefulset.yaml b/charts/bitnami/postgresql/templates/read/statefulset.yaml index 7cfa06bda..494f22e2d 100644 --- a/charts/bitnami/postgresql/templates/read/statefulset.yaml +++ b/charts/bitnami/postgresql/templates/read/statefulset.yaml @@ -413,13 +413,11 @@ spec: - name: dshm mountPath: /dev/shm {{- end }} - {{- if .Values.readReplicas.persistence.enabled }} - name: data mountPath: {{ .Values.readReplicas.persistence.mountPath }} {{- if .Values.readReplicas.persistence.subPath }} subPath: {{ .Values.readReplicas.persistence.subPath }} {{- end }} - {{- end }} {{- if .Values.readReplicas.extraVolumeMounts }} {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.extraVolumeMounts "context" $) | nindent 12 }} {{- end }} diff --git a/charts/bitnami/postgresql/values.yaml b/charts/bitnami/postgresql/values.yaml index 917a32b23..70da02fd1 100644 --- a/charts/bitnami/postgresql/values.yaml +++ b/charts/bitnami/postgresql/values.yaml @@ -105,7 +105,7 @@ diagnosticMode: image: registry: docker.io repository: bitnami/postgresql - tag: 16.2.0-debian-12-r10 + tag: 16.2.0-debian-12-r12 digest: "" ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' @@ -738,6 +738,9 @@ primary: ## @param primary.persistence.enabled Enable PostgreSQL Primary data persistence using PVC ## enabled: true + ## @param primary.persistence.volumeName Name to assign the volume + ## + volumeName: "data" ## @param primary.persistence.existingClaim Name of an existing PVC to use ## existingClaim: "" @@ -1392,7 +1395,7 @@ volumePermissions: image: registry: docker.io repository: bitnami/os-shell - tag: 12-debian-12-r17 + tag: 12-debian-12-r18 digest: "" pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. @@ -1501,7 +1504,7 @@ metrics: image: registry: docker.io repository: bitnami/postgres-exporter - tag: 0.15.0-debian-12-r14 + tag: 0.15.0-debian-12-r15 digest: "" pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. diff --git a/charts/bitnami/redis/Chart.yaml b/charts/bitnami/redis/Chart.yaml index a61277d71..d506e2ad8 100644 --- a/charts/bitnami/redis/Chart.yaml +++ b/charts/bitnami/redis/Chart.yaml @@ -39,4 +39,4 @@ maintainers: name: redis sources: - https://github.com/bitnami/charts/tree/main/bitnami/redis -version: 19.0.1 +version: 19.0.2 diff --git a/charts/bitnami/redis/README.md b/charts/bitnami/redis/README.md index b958bd87a..7f71fce90 100644 --- a/charts/bitnami/redis/README.md +++ b/charts/bitnami/redis/README.md @@ -973,6 +973,15 @@ helm install my-release --set master.persistence.existingClaim=PVC_NAME oci://RE | `kubectl.image.pullPolicy` | Kubectl image pull policy | `IfNotPresent` | | `kubectl.image.pullSecrets` | Kubectl pull secrets | `[]` | | `kubectl.command` | kubectl command to execute | `["/opt/bitnami/scripts/kubectl-scripts/update-master-label.sh"]` | +| `kubectl.containerSecurityContext.enabled` | Enabled kubectl containers' Security Context | `true` | +| `kubectl.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `kubectl.containerSecurityContext.runAsUser` | Set kubectl containers' Security Context runAsUser | `1001` | +| `kubectl.containerSecurityContext.runAsGroup` | Set kubectl containers' Security Context runAsGroup | `1001` | +| `kubectl.containerSecurityContext.runAsNonRoot` | Set kubectl containers' Security Context runAsNonRoot | `true` | +| `kubectl.containerSecurityContext.allowPrivilegeEscalation` | Set kubectl containers' Security Context allowPrivilegeEscalation | `false` | +| `kubectl.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context read-only root filesystem | `true` | +| `kubectl.containerSecurityContext.seccompProfile.type` | Set kubectl containers' Security Context seccompProfile | `RuntimeDefault` | +| `kubectl.containerSecurityContext.capabilities.drop` | Set kubectl containers' Security Context capabilities to drop | `["ALL"]` | | `kubectl.resources.limits` | The resources limits for the kubectl containers | `{}` | | `kubectl.resources.requests` | The requested resources for the kubectl containers | `{}` | | `sysctl.enabled` | Enable init container to modify Kernel settings | `false` | diff --git a/charts/bitnami/redis/templates/sentinel/statefulset.yaml b/charts/bitnami/redis/templates/sentinel/statefulset.yaml index dfb1352bf..563d40e71 100644 --- a/charts/bitnami/redis/templates/sentinel/statefulset.yaml +++ b/charts/bitnami/redis/templates/sentinel/statefulset.yaml @@ -598,8 +598,9 @@ spec: image: {{ template "redis.kubectl.image" . }} imagePullPolicy: {{ .Values.kubectl.image.pullPolicy | quote }} command: {{- toYaml .Values.kubectl.command | nindent 12 }} - securityContext: - runAsUser: 0 + {{- if .Values.kubectl.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.kubectl.containerSecurityContext "context" $) | nindent 12 }} + {{- end }} volumeMounts: - name: kubectl-shared mountPath: /etc/shared diff --git a/charts/bitnami/redis/values.yaml b/charts/bitnami/redis/values.yaml index 142da1fbf..c22abd7d9 100644 --- a/charts/bitnami/redis/values.yaml +++ b/charts/bitnami/redis/values.yaml @@ -2052,6 +2052,30 @@ kubectl: ## @param kubectl.command kubectl command to execute ## command: ["/opt/bitnami/scripts/kubectl-scripts/update-master-label.sh"] + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param kubectl.containerSecurityContext.enabled Enabled kubectl containers' Security Context + ## @param kubectl.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param kubectl.containerSecurityContext.runAsUser Set kubectl containers' Security Context runAsUser + ## @param kubectl.containerSecurityContext.runAsGroup Set kubectl containers' Security Context runAsGroup + ## @param kubectl.containerSecurityContext.runAsNonRoot Set kubectl containers' Security Context runAsNonRoot + ## @param kubectl.containerSecurityContext.allowPrivilegeEscalation Set kubectl containers' Security Context allowPrivilegeEscalation + ## @param kubectl.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context read-only root filesystem + ## @param kubectl.containerSecurityContext.seccompProfile.type Set kubectl containers' Security Context seccompProfile + ## @param kubectl.containerSecurityContext.capabilities.drop Set kubectl containers' Security Context capabilities to drop + ## + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + seccompProfile: + type: RuntimeDefault + capabilities: + drop: ["ALL"] ## Bitnami Kubectl resource requests and limits ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ ## @param kubectl.resources.limits The resources limits for the kubectl containers diff --git a/charts/bitnami/tomcat/Chart.lock b/charts/bitnami/tomcat/Chart.lock index eaff6d58f..f049db3b2 100644 --- a/charts/bitnami/tomcat/Chart.lock +++ b/charts/bitnami/tomcat/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: common repository: oci://registry-1.docker.io/bitnamicharts - version: 2.19.0 -digest: sha256:ac559eb57710d8904e266424ee364cd686d7e24517871f0c5c67f7c4500c2bcc -generated: "2024-03-15T01:41:57.021991384Z" + version: 2.19.1 +digest: sha256:c883732817d9aaa3304f7b3109262aa338959de15b432dc5a2dbde13d2e136a5 +generated: "2024-03-27T14:58:35.744336265+01:00" diff --git a/charts/bitnami/tomcat/Chart.yaml b/charts/bitnami/tomcat/Chart.yaml index 1e2b8b471..fd01e2c6a 100644 --- a/charts/bitnami/tomcat/Chart.yaml +++ b/charts/bitnami/tomcat/Chart.yaml @@ -6,14 +6,14 @@ annotations: category: ApplicationServer images: | - name: jmx-exporter - image: docker.io/bitnami/jmx-exporter:0.20.0-debian-12-r11 + image: docker.io/bitnami/jmx-exporter:0.20.0-debian-12-r12 - name: os-shell - image: docker.io/bitnami/os-shell:12-debian-12-r16 + image: docker.io/bitnami/os-shell:12-debian-12-r17 - name: tomcat - image: docker.io/bitnami/tomcat:10.1.19-debian-12-r2 + image: docker.io/bitnami/tomcat:10.1.20-debian-12-r0 licenses: Apache-2.0 apiVersion: v2 -appVersion: 10.1.19 +appVersion: 10.1.20 dependencies: - name: common repository: file://./charts/common @@ -38,4 +38,4 @@ maintainers: name: tomcat sources: - https://github.com/bitnami/charts/tree/main/bitnami/tomcat -version: 10.17.1 +version: 11.0.0 diff --git a/charts/bitnami/tomcat/README.md b/charts/bitnami/tomcat/README.md index 37a7580ad..07c02695b 100644 --- a/charts/bitnami/tomcat/README.md +++ b/charts/bitnami/tomcat/README.md @@ -45,250 +45,6 @@ These commands deploy Tomcat on the Kubernetes cluster in the default configurat > **Tip**: List all releases using `helm list` -## Uninstalling the Chart - -To uninstall/delete the `my-release` deployment: - -```console -helm delete my-release -``` - -The command removes all the Kubernetes components associated with the chart and deletes the release. - -## Parameters - -### Global parameters - -| Name | Description | Value | -| ----------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------- | -| `global.imageRegistry` | Global Docker image registry | `""` | -| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | -| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | -| `global.compatibility.openshift.adaptSecurityContext` | Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation) | `disabled` | - -### Common parameters - -| Name | Description | Value | -| ------------------- | -------------------------------------------------------------------------------------------- | --------------- | -| `kubeVersion` | Force target Kubernetes version (using Helm capabilities if not set) | `""` | -| `nameOverride` | String to partially override common.names.fullname template (will maintain the release name) | `""` | -| `fullnameOverride` | String to fully override common.names.fullname template | `""` | -| `commonLabels` | Add labels to all the deployed resources | `{}` | -| `commonAnnotations` | Add annotations to all the deployed resources | `{}` | -| `clusterDomain` | Kubernetes Cluster Domain | `cluster.local` | -| `extraDeploy` | Array of extra objects to deploy with the release | `[]` | - -### Tomcat parameters - -| Name | Description | Value | -| ------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------ | -| `image.registry` | Tomcat image registry | `REGISTRY_NAME` | -| `image.repository` | Tomcat image repository | `REPOSITORY_NAME/tomcat` | -| `image.digest` | Tomcat image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | -| `image.pullPolicy` | Tomcat image pull policy | `IfNotPresent` | -| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | -| `image.debug` | Specify if debug logs should be enabled | `false` | -| `automountServiceAccountToken` | Mount Service Account token in pod | `false` | -| `hostAliases` | Deployment pod host aliases | `[]` | -| `tomcatUsername` | Tomcat admin user | `user` | -| `tomcatPassword` | Tomcat admin password | `""` | -| `existingSecret` | Use existing secret for password details (`tomcatPassword` will be ignored and picked up from this secret). The secret has to contain the key `tomcat-password` | `""` | -| `tomcatAllowRemoteManagement` | Enable remote access to management interface | `0` | -| `catalinaOpts` | Java runtime option used by tomcat JVM | `""` | -| `command` | Override default container command (useful when using custom images) | `[]` | -| `args` | Override default container args (useful when using custom images) | `[]` | -| `extraEnvVars` | Extra environment variables to be set on Tomcat container | `[]` | -| `extraEnvVarsCM` | Name of existing ConfigMap containing extra environment variables | `""` | -| `extraEnvVarsSecret` | Name of existing Secret containing extra environment variables | `""` | - -### Tomcat deployment parameters - -| Name | Description | Value | -| --------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------- | -| `replicaCount` | Specify number of Tomcat replicas | `1` | -| `deployment.type` | Use Deployment or StatefulSet | `deployment` | -| `updateStrategy.type` | StrategyType | `RollingUpdate` | -| `containerPorts.http` | HTTP port to expose at container level | `8080` | -| `containerExtraPorts` | Extra ports to expose at container level | `[]` | -| `podSecurityContext.enabled` | Enable Tomcat pods' Security Context | `true` | -| `podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | -| `podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | -| `podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | -| `podSecurityContext.fsGroup` | Set Tomcat pod's Security Context fsGroup | `1001` | -| `containerSecurityContext.enabled` | Enabled containers' Security Context | `true` | -| `containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `nil` | -| `containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | -| `containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` | -| `containerSecurityContext.privileged` | Set container's Security Context privileged | `false` | -| `containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `false` | -| `containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` | -| `containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | -| `containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | -| `resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `none` | -| `resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | -| `livenessProbe.enabled` | Enable livenessProbe | `true` | -| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `120` | -| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | -| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | -| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | -| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | -| `readinessProbe.enabled` | Enable readinessProbe | `true` | -| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `30` | -| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `5` | -| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `3` | -| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` | -| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | -| `startupProbe.enabled` | Enable startupProbe | `false` | -| `startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` | -| `startupProbe.periodSeconds` | Period seconds for startupProbe | `5` | -| `startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `3` | -| `startupProbe.failureThreshold` | Failure threshold for startupProbe | `3` | -| `startupProbe.successThreshold` | Success threshold for startupProbe | `1` | -| `customLivenessProbe` | Override default liveness probe | `{}` | -| `customReadinessProbe` | Override default readiness probe | `{}` | -| `customStartupProbe` | Override default startup probe | `{}` | -| `podLabels` | Extra labels for Tomcat pods | `{}` | -| `podAnnotations` | Annotations for Tomcat pods | `{}` | -| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | -| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | -| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | -| `nodeAffinityPreset.key` | Node label key to match. Ignored if `affinity` is set. | `""` | -| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | -| `affinity` | Affinity for pod assignment. Evaluated as a template. | `{}` | -| `nodeSelector` | Node labels for pod assignment. Evaluated as a template. | `{}` | -| `schedulerName` | Alternative scheduler | `""` | -| `lifecycleHooks` | Override default etcd container hooks | `{}` | -| `podManagementPolicy` | podManagementPolicy to manage scaling operation of pods (only in StatefulSet mode) | `""` | -| `tolerations` | Tolerations for pod assignment. Evaluated as a template. | `[]` | -| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | -| `extraPodSpec` | Optionally specify extra PodSpec | `{}` | -| `extraVolumes` | Optionally specify extra list of additional volumes for Tomcat pods in Deployment | `[]` | -| `extraVolumeClaimTemplates` | Optionally specify extra list of additional volume claim templates for Tomcat pods in StatefulSet | `[]` | -| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for Tomcat container(s) | `[]` | -| `initContainers` | Add init containers to the Tomcat pods. | `[]` | -| `sidecars` | Add sidecars to the Tomcat pods. | `[]` | -| `persistence.enabled` | Enable persistence | `true` | -| `persistence.storageClass` | PVC Storage Class for Tomcat volume | `""` | -| `persistence.annotations` | Persistent Volume Claim annotations | `{}` | -| `persistence.accessModes` | PVC Access Modes for Tomcat volume | `["ReadWriteOnce"]` | -| `persistence.size` | PVC Storage Request for Tomcat volume | `8Gi` | -| `persistence.existingClaim` | An Existing PVC name for Tomcat volume | `""` | -| `persistence.selectorLabels` | Selector labels to use in volume claim template in statefulset | `{}` | -| `networkPolicy.enabled` | Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. | `false` | -| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | -| `networkPolicy.explicitNamespacesSelector` | A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed | `{}` | -| `serviceAccount.create` | Enable creation of ServiceAccount for Tomcat pod | `true` | -| `serviceAccount.name` | The name of the ServiceAccount to use. | `""` | -| `serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `false` | -| `serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` | - -### Traffic Exposure parameters - -| Name | Description | Value | -| ---------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | ------------------------ | -| `service.type` | Kubernetes Service type | `LoadBalancer` | -| `service.ports.http` | Service HTTP port | `80` | -| `service.nodePorts.http` | Kubernetes http node port | `""` | -| `service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` | -| `service.loadBalancerIP` | Port Use serviceLoadBalancerIP to request a specific static IP, otherwise leave blank | `""` | -| `service.clusterIP` | Service Cluster IP | `""` | -| `service.loadBalancerSourceRanges` | Service Load Balancer sources | `[]` | -| `service.externalTrafficPolicy` | Enable client source IP preservation | `Cluster` | -| `service.annotations` | Annotations for Tomcat service | `{}` | -| `service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` | -| `service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | -| `service.headless.annotations` | Annotations for the headless service. | `{}` | -| `ingress.enabled` | Enable ingress controller resource | `false` | -| `ingress.hostname` | Default host for the ingress resource | `tomcat.local` | -| `ingress.annotations` | Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. | `{}` | -| `ingress.tls` | Enable TLS configuration for the hostname defined at `ingress.hostname` parameter | `false` | -| `ingress.extraHosts` | The list of additional hostnames to be covered with this ingress record. | `[]` | -| `ingress.extraTls` | The tls configuration for additional hostnames to be covered with this ingress record. | `[]` | -| `ingress.extraPaths` | Any additional arbitrary paths that may need to be added to the ingress under the main host. | `[]` | -| `ingress.selfSigned` | Create a TLS secret for this ingress record using self-signed certificates generated by Helm | `false` | -| `ingress.ingressClassName` | IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) | `""` | -| `ingress.secrets` | If you're providing your own certificates, please use this to add the certificates as secrets | `[]` | -| `ingress.extraRules` | Additional rules to be covered with this ingress record | `[]` | -| `ingress.apiVersion` | Force Ingress API version (automatically detected if not set) | `""` | -| `ingress.path` | Ingress path | `/` | -| `ingress.pathType` | Ingress path type | `ImplementationSpecific` | - -### Volume Permissions parameters - -| Name | Description | Value | -| ------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------- | -| `volumePermissions.enabled` | Enable init container that changes volume permissions in the data directory | `false` | -| `volumePermissions.image.registry` | Init container volume-permissions image registry | `REGISTRY_NAME` | -| `volumePermissions.image.repository` | Init container volume-permissions image repository | `REPOSITORY_NAME/os-shell` | -| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | -| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | -| `volumePermissions.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | -| `volumePermissions.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production). | `none` | -| `volumePermissions.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | - -### Metrics parameters - -| Name | Description | Value | -| --------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `metrics.jmx.enabled` | Whether or not to expose JMX metrics to Prometheus | `false` | -| `metrics.jmx.catalinaOpts` | custom option used to enabled JMX on tomcat jvm evaluated as template | `-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.port=5555 -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.local.only=true` | -| `metrics.jmx.image.registry` | JMX exporter image registry | `REGISTRY_NAME` | -| `metrics.jmx.image.repository` | JMX exporter image repository | `REPOSITORY_NAME/jmx-exporter` | -| `metrics.jmx.image.digest` | JMX exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | -| `metrics.jmx.image.pullPolicy` | JMX exporter image pull policy | `IfNotPresent` | -| `metrics.jmx.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | -| `metrics.jmx.config` | Configuration file for JMX exporter | `""` | -| `metrics.jmx.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` | -| `metrics.jmx.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `nil` | -| `metrics.jmx.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | -| `metrics.jmx.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` | -| `metrics.jmx.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` | -| `metrics.jmx.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `false` | -| `metrics.jmx.containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` | -| `metrics.jmx.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | -| `metrics.jmx.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | -| `metrics.jmx.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if metrics.jmx.resources is set (metrics.jmx.resources is recommended for production). | `none` | -| `metrics.jmx.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | -| `metrics.jmx.ports.metrics` | JMX Exporter container metrics ports | `5556` | -| `metrics.jmx.existingConfigmap` | Name of existing ConfigMap with JMX exporter configuration | `""` | -| `metrics.podMonitor.podTargetLabels` | Used to keep given pod's labels in target | `[]` | -| `metrics.podMonitor.enabled` | Create PodMonitor Resource for scraping metrics using PrometheusOperator | `false` | -| `metrics.podMonitor.namespace` | Optional namespace in which Prometheus is running | `""` | -| `metrics.podMonitor.interval` | Specify the interval at which metrics should be scraped | `30s` | -| `metrics.podMonitor.scrapeTimeout` | Specify the timeout after which the scrape is ended | `30s` | -| `metrics.podMonitor.additionalLabels` | Additional labels that can be used so PodMonitors will be discovered by Prometheus | `{}` | -| `metrics.podMonitor.scheme` | Scheme to use for scraping | `http` | -| `metrics.podMonitor.tlsConfig` | TLS configuration used for scrape endpoints used by Prometheus | `{}` | -| `metrics.podMonitor.relabelings` | Prometheus relabeling rules | `[]` | -| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | -| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | -| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | `""` | -| `metrics.prometheusRule.rules` | Create specified [Rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) | `[]` | - -The above parameters map to the env variables defined in [bitnami/tomcat](https://github.com/bitnami/containers/tree/main/bitnami/tomcat). For more information please refer to the [bitnami/tomcat](https://github.com/bitnami/containers/tree/main/bitnami/tomcat) image documentation. - -Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, - -```console -helm install my-release \ - --set tomcatUsername=manager,tomcatPassword=password oci://REGISTRY_NAME/REPOSITORY_NAME/tomcat -``` - -> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. - -The above command sets the Tomcat management username and password to `manager` and `password` respectively. - -> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available. - -Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, - -```console -helm install my-release -f values.yaml oci://REGISTRY_NAME/REPOSITORY_NAME/tomcat -``` - -> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. -> **Tip**: You can use the default [values.yaml](https://github.com/bitnami/charts/tree/main/bitnami/tomcat/values.yaml) - ## Configuration and installation details ### Resource requests and limits @@ -382,12 +138,264 @@ As an alternative, this chart supports using an init container to change the own You can enable this init container by setting `volumePermissions.enabled` to `true`. +## Parameters + +### Global parameters + +| Name | Description | Value | +| ----------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | +| `global.compatibility.openshift.adaptSecurityContext` | Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation) | `disabled` | + +### Common parameters + +| Name | Description | Value | +| ------------------- | -------------------------------------------------------------------------------------------- | --------------- | +| `kubeVersion` | Force target Kubernetes version (using Helm capabilities if not set) | `""` | +| `nameOverride` | String to partially override common.names.fullname template (will maintain the release name) | `""` | +| `fullnameOverride` | String to fully override common.names.fullname template | `""` | +| `commonLabels` | Add labels to all the deployed resources | `{}` | +| `commonAnnotations` | Add annotations to all the deployed resources | `{}` | +| `clusterDomain` | Kubernetes Cluster Domain | `cluster.local` | +| `extraDeploy` | Array of extra objects to deploy with the release | `[]` | + +### Tomcat parameters + +| Name | Description | Value | +| ------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------ | +| `image.registry` | Tomcat image registry | `REGISTRY_NAME` | +| `image.repository` | Tomcat image repository | `REPOSITORY_NAME/tomcat` | +| `image.digest` | Tomcat image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `image.pullPolicy` | Tomcat image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `image.debug` | Specify if debug logs should be enabled | `false` | +| `automountServiceAccountToken` | Mount Service Account token in pod | `false` | +| `hostAliases` | Deployment pod host aliases | `[]` | +| `tomcatUsername` | Tomcat admin user | `user` | +| `tomcatPassword` | Tomcat admin password | `""` | +| `existingSecret` | Use existing secret for password details (`tomcatPassword` will be ignored and picked up from this secret). The secret has to contain the key `tomcat-password` | `""` | +| `tomcatAllowRemoteManagement` | Enable remote access to management interface | `0` | +| `catalinaOpts` | Java runtime option used by tomcat JVM | `""` | +| `command` | Override default container command (useful when using custom images) | `[]` | +| `args` | Override default container args (useful when using custom images) | `[]` | +| `extraEnvVars` | Extra environment variables to be set on Tomcat container | `[]` | +| `extraEnvVarsCM` | Name of existing ConfigMap containing extra environment variables | `""` | +| `extraEnvVarsSecret` | Name of existing Secret containing extra environment variables | `""` | + +### Tomcat deployment parameters + +| Name | Description | Value | +| --------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------- | +| `replicaCount` | Specify number of Tomcat replicas | `1` | +| `deployment.type` | Use Deployment or StatefulSet | `deployment` | +| `updateStrategy.type` | StrategyType | `RollingUpdate` | +| `containerPorts.http` | HTTP port to expose at container level | `8080` | +| `containerExtraPorts` | Extra ports to expose at container level | `[]` | +| `podSecurityContext.enabled` | Enable Tomcat pods' Security Context | `true` | +| `podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | +| `podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | +| `podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | +| `podSecurityContext.fsGroup` | Set Tomcat pod's Security Context fsGroup | `1001` | +| `containerSecurityContext.enabled` | Enabled containers' Security Context | `true` | +| `containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | +| `containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` | +| `containerSecurityContext.privileged` | Set container's Security Context privileged | `false` | +| `containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `true` | +| `containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` | +| `containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `micro` | +| `resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `livenessProbe.enabled` | Enable livenessProbe | `true` | +| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `120` | +| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `readinessProbe.enabled` | Enable readinessProbe | `true` | +| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `30` | +| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `5` | +| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `3` | +| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` | +| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `startupProbe.enabled` | Enable startupProbe | `false` | +| `startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` | +| `startupProbe.periodSeconds` | Period seconds for startupProbe | `5` | +| `startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `3` | +| `startupProbe.failureThreshold` | Failure threshold for startupProbe | `3` | +| `startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `customLivenessProbe` | Override default liveness probe | `{}` | +| `customReadinessProbe` | Override default readiness probe | `{}` | +| `customStartupProbe` | Override default startup probe | `{}` | +| `podLabels` | Extra labels for Tomcat pods | `{}` | +| `podAnnotations` | Annotations for Tomcat pods | `{}` | +| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `nodeAffinityPreset.key` | Node label key to match. Ignored if `affinity` is set. | `""` | +| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | +| `affinity` | Affinity for pod assignment. Evaluated as a template. | `{}` | +| `nodeSelector` | Node labels for pod assignment. Evaluated as a template. | `{}` | +| `schedulerName` | Alternative scheduler | `""` | +| `lifecycleHooks` | Override default etcd container hooks | `{}` | +| `podManagementPolicy` | podManagementPolicy to manage scaling operation of pods (only in StatefulSet mode) | `""` | +| `tolerations` | Tolerations for pod assignment. Evaluated as a template. | `[]` | +| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `extraPodSpec` | Optionally specify extra PodSpec | `{}` | +| `extraVolumes` | Optionally specify extra list of additional volumes for Tomcat pods in Deployment | `[]` | +| `extraVolumeClaimTemplates` | Optionally specify extra list of additional volume claim templates for Tomcat pods in StatefulSet | `[]` | +| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for Tomcat container(s) | `[]` | +| `initContainers` | Add init containers to the Tomcat pods. | `[]` | +| `sidecars` | Add sidecars to the Tomcat pods. | `[]` | +| `persistence.enabled` | Enable persistence | `true` | +| `persistence.storageClass` | PVC Storage Class for Tomcat volume | `""` | +| `persistence.annotations` | Persistent Volume Claim annotations | `{}` | +| `persistence.accessModes` | PVC Access Modes for Tomcat volume | `["ReadWriteOnce"]` | +| `persistence.size` | PVC Storage Request for Tomcat volume | `8Gi` | +| `persistence.existingClaim` | An Existing PVC name for Tomcat volume | `""` | +| `persistence.selectorLabels` | Selector labels to use in volume claim template in statefulset | `{}` | +| `networkPolicy.enabled` | Specifies whether a NetworkPolicy should be created | `true` | +| `networkPolicy.allowExternal` | Don't require server label for connections | `true` | +| `networkPolicy.allowExternalEgress` | Allow the pod to access any range of port and all destinations. | `true` | +| `networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolice | `[]` | +| `networkPolicy.extraEgress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces | `{}` | +| `networkPolicy.ingressNSPodMatchLabels` | Pod labels to match to allow traffic from other namespaces | `{}` | +| `serviceAccount.create` | Enable creation of ServiceAccount for Tomcat pod | `true` | +| `serviceAccount.name` | The name of the ServiceAccount to use. | `""` | +| `serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `false` | +| `serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` | + +### Traffic Exposure parameters + +| Name | Description | Value | +| ---------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | ------------------------ | +| `service.type` | Kubernetes Service type | `LoadBalancer` | +| `service.ports.http` | Service HTTP port | `80` | +| `service.nodePorts.http` | Kubernetes http node port | `""` | +| `service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` | +| `service.loadBalancerIP` | Port Use serviceLoadBalancerIP to request a specific static IP, otherwise leave blank | `""` | +| `service.clusterIP` | Service Cluster IP | `""` | +| `service.loadBalancerSourceRanges` | Service Load Balancer sources | `[]` | +| `service.externalTrafficPolicy` | Enable client source IP preservation | `Cluster` | +| `service.annotations` | Annotations for Tomcat service | `{}` | +| `service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` | +| `service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `service.headless.annotations` | Annotations for the headless service. | `{}` | +| `ingress.enabled` | Enable ingress controller resource | `false` | +| `ingress.hostname` | Default host for the ingress resource | `tomcat.local` | +| `ingress.annotations` | Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. | `{}` | +| `ingress.tls` | Enable TLS configuration for the hostname defined at `ingress.hostname` parameter | `false` | +| `ingress.extraHosts` | The list of additional hostnames to be covered with this ingress record. | `[]` | +| `ingress.extraTls` | The tls configuration for additional hostnames to be covered with this ingress record. | `[]` | +| `ingress.extraPaths` | Any additional arbitrary paths that may need to be added to the ingress under the main host. | `[]` | +| `ingress.selfSigned` | Create a TLS secret for this ingress record using self-signed certificates generated by Helm | `false` | +| `ingress.ingressClassName` | IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) | `""` | +| `ingress.secrets` | If you're providing your own certificates, please use this to add the certificates as secrets | `[]` | +| `ingress.extraRules` | Additional rules to be covered with this ingress record | `[]` | +| `ingress.apiVersion` | Force Ingress API version (automatically detected if not set) | `""` | +| `ingress.path` | Ingress path | `/` | +| `ingress.pathType` | Ingress path type | `ImplementationSpecific` | + +### Volume Permissions parameters + +| Name | Description | Value | +| ------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------- | +| `volumePermissions.enabled` | Enable init container that changes volume permissions in the data directory | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `REGISTRY_NAME` | +| `volumePermissions.image.repository` | Init container volume-permissions image repository | `REPOSITORY_NAME/os-shell` | +| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `volumePermissions.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production). | `none` | +| `volumePermissions.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | + +### Metrics parameters + +| Name | Description | Value | +| --------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `metrics.jmx.enabled` | Whether or not to expose JMX metrics to Prometheus | `false` | +| `metrics.jmx.catalinaOpts` | custom option used to enabled JMX on tomcat jvm evaluated as template | `-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.port=5555 -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.local.only=true` | +| `metrics.jmx.image.registry` | JMX exporter image registry | `REGISTRY_NAME` | +| `metrics.jmx.image.repository` | JMX exporter image repository | `REPOSITORY_NAME/jmx-exporter` | +| `metrics.jmx.image.digest` | JMX exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `metrics.jmx.image.pullPolicy` | JMX exporter image pull policy | `IfNotPresent` | +| `metrics.jmx.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `metrics.jmx.config` | Configuration file for JMX exporter | `""` | +| `metrics.jmx.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` | +| `metrics.jmx.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `metrics.jmx.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `metrics.jmx.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | +| `metrics.jmx.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` | +| `metrics.jmx.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` | +| `metrics.jmx.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `true` | +| `metrics.jmx.containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` | +| `metrics.jmx.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `metrics.jmx.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `metrics.jmx.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if metrics.jmx.resources is set (metrics.jmx.resources is recommended for production). | `none` | +| `metrics.jmx.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `metrics.jmx.ports.metrics` | JMX Exporter container metrics ports | `5556` | +| `metrics.jmx.existingConfigmap` | Name of existing ConfigMap with JMX exporter configuration | `""` | +| `metrics.podMonitor.podTargetLabels` | Used to keep given pod's labels in target | `[]` | +| `metrics.podMonitor.enabled` | Create PodMonitor Resource for scraping metrics using PrometheusOperator | `false` | +| `metrics.podMonitor.namespace` | Optional namespace in which Prometheus is running | `""` | +| `metrics.podMonitor.interval` | Specify the interval at which metrics should be scraped | `30s` | +| `metrics.podMonitor.scrapeTimeout` | Specify the timeout after which the scrape is ended | `30s` | +| `metrics.podMonitor.additionalLabels` | Additional labels that can be used so PodMonitors will be discovered by Prometheus | `{}` | +| `metrics.podMonitor.scheme` | Scheme to use for scraping | `http` | +| `metrics.podMonitor.tlsConfig` | TLS configuration used for scrape endpoints used by Prometheus | `{}` | +| `metrics.podMonitor.relabelings` | Prometheus relabeling rules | `[]` | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | `""` | +| `metrics.prometheusRule.rules` | Create specified [Rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) | `[]` | + +The above parameters map to the env variables defined in [bitnami/tomcat](https://github.com/bitnami/containers/tree/main/bitnami/tomcat). For more information please refer to the [bitnami/tomcat](https://github.com/bitnami/containers/tree/main/bitnami/tomcat) image documentation. + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +helm install my-release \ + --set tomcatUsername=manager,tomcatPassword=password oci://REGISTRY_NAME/REPOSITORY_NAME/tomcat +``` + +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. + +The above command sets the Tomcat management username and password to `manager` and `password` respectively. + +> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +helm install my-release -f values.yaml oci://REGISTRY_NAME/REPOSITORY_NAME/tomcat +``` + +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. +> **Tip**: You can use the default [values.yaml](https://github.com/bitnami/charts/tree/main/bitnami/tomcat/values.yaml) + ## Troubleshooting Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). ## Upgrading +### To 11.0.0 + +This major bump changes the following security defaults: + +- `runAsGroup` is changed from `0` to `1001` +- `readOnlyRootFilesystem` is set to `true` +- `resourcesPreset` is changed from `none` to the minimum size working in our test suites (NOTE: `resourcesPreset` is not meant for production usage, but `resources` adapted to your use case). +- `global.compatibility.openshift.adaptSecurityContext` is changed from `disabled` to `auto`. +- The `networkPolicy` section has been normalized amongst all Bitnami charts. Compared to the previous approach, the values section has been simplified (check the Parameters section) and now it set to `enabled=true` by default. Egress traffic is allowed by default and ingress traffic is allowed by all pods but only to the ports set in `containerPorts`. + +This could potentially break any customization or init scripts used in your deployment. If this is the case, change the default values to the previous ones. + ### To 10.0.0 Some of the chart values were changed to adapt to the latest Bitnami standards. More specifically: diff --git a/charts/bitnami/tomcat/charts/common/Chart.yaml b/charts/bitnami/tomcat/charts/common/Chart.yaml index f86ccd23a..8d0e54694 100644 --- a/charts/bitnami/tomcat/charts/common/Chart.yaml +++ b/charts/bitnami/tomcat/charts/common/Chart.yaml @@ -2,7 +2,7 @@ annotations: category: Infrastructure licenses: Apache-2.0 apiVersion: v2 -appVersion: 2.19.0 +appVersion: 2.19.1 description: A Library Helm Chart for grouping common logic between bitnami charts. This chart is not deployable by itself. home: https://bitnami.com @@ -20,4 +20,4 @@ name: common sources: - https://github.com/bitnami/charts type: library -version: 2.19.0 +version: 2.19.1 diff --git a/charts/bitnami/tomcat/charts/common/templates/_resources.tpl b/charts/bitnami/tomcat/charts/common/templates/_resources.tpl index d90f8752d..030fa1a99 100644 --- a/charts/bitnami/tomcat/charts/common/templates/_resources.tpl +++ b/charts/bitnami/tomcat/charts/common/templates/_resources.tpl @@ -11,7 +11,7 @@ These presets are for basic testing and not meant to be used in production {{ include "common.resources.preset" (dict "type" "nano") -}} */}} {{- define "common.resources.preset" -}} -{{/* The limits are the requests increased by 50% (except ephemeral-storage)*/}} +{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}} {{- $presets := dict "nano" (dict "requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi") @@ -34,11 +34,11 @@ These presets are for basic testing and not meant to be used in production "limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "1024Mi") ) "xlarge" (dict - "requests" (dict "cpu" "2.0" "memory" "4096Mi" "ephemeral-storage" "50Mi") + "requests" (dict "cpu" "1.5" "memory" "4096Mi" "ephemeral-storage" "50Mi") "limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "1024Mi") ) "2xlarge" (dict - "requests" (dict "cpu" "4.0" "memory" "8192Mi" "ephemeral-storage" "50Mi") + "requests" (dict "cpu" "1.5" "memory" "4096Mi" "ephemeral-storage" "50Mi") "limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "1024Mi") ) }} @@ -47,4 +47,4 @@ These presets are for basic testing and not meant to be used in production {{- else -}} {{- printf "ERROR: Preset key '%s' invalid. Allowed values are %s" .type (join "," (keys $presets)) | fail -}} {{- end -}} -{{- end -}} \ No newline at end of file +{{- end -}} diff --git a/charts/bitnami/tomcat/templates/_pod.tpl b/charts/bitnami/tomcat/templates/_pod.tpl index 0944cb68b..4d046a030 100644 --- a/charts/bitnami/tomcat/templates/_pod.tpl +++ b/charts/bitnami/tomcat/templates/_pod.tpl @@ -150,6 +150,21 @@ containers: volumeMounts: - name: data mountPath: /bitnami/tomcat + - name: empty-dir + mountPath: /opt/bitnami/tomcat/temp + subPath: app-tmp-dir + - name: empty-dir + mountPath: /opt/bitnami/tomcat/conf + subPath: app-conf-dir + - name: empty-dir + mountPath: /opt/bitnami/tomcat/logs + subPath: app-logs-dir + - name: empty-dir + mountPath: /opt/bitnami/tomcat/work + subPath: app-work-dir + - name: empty-dir + mountPath: /tmp + subPath: tmp-dir {{- if .Values.extraVolumeMounts }} {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 6 }} {{- end }} @@ -182,11 +197,16 @@ containers: volumeMounts: - name: jmx-config mountPath: /etc/jmx-tomcat + - name: empty-dir + mountPath: /tmp + subPath: tmp-dir {{- end }} {{- if .Values.sidecars }} {{- include "common.tplvalues.render" ( dict "value" .Values.sidecars "context" $) | nindent 2 }} {{- end }} volumes: + - name: empty-dir + emptyDir: {} {{- if (eq .Values.deployment.type "deployment") }} {{- if and .Values.persistence.enabled }} - name: data diff --git a/charts/bitnami/tomcat/templates/networkpolicy.yaml b/charts/bitnami/tomcat/templates/networkpolicy.yaml index efbe385a6..c82c811ec 100644 --- a/charts/bitnami/tomcat/templates/networkpolicy.yaml +++ b/charts/bitnami/tomcat/templates/networkpolicy.yaml @@ -8,34 +8,85 @@ kind: NetworkPolicy apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} metadata: name: {{ template "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} {{- if .Values.commonAnnotations }} annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} {{- end }} - namespace: {{ .Release.Namespace }} spec: {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }} podSelector: matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + policyTypes: + - Ingress + - Egress + {{- if .Values.networkPolicy.allowExternalEgress }} + egress: + - {} + {{- else }} + egress: + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + # Allow outbound connections to MariaDB + - ports: + - port: {{ include "wordpress.databasePort" . }} + {{- if .Values.mariadb.enabled }} + to: + - podSelector: + matchLabels: + app.kubernetes.io/name: mariadb + app.kubernetes.io/instance: {{ .Release.Name }} + {{- end }} + {{- if .Values.wordpressConfigureCache }} + # Allow outbound connections to Memcached + - ports: + - port: {{ include "wordpress.cachePort" . }} + {{- if .Values.memcached.enabled }} + to: + - podSelector: + matchLabels: + app.kubernetes.io/name: memcached + app.kubernetes.io/instance: {{ .Release.Name }} + {{- end }} + {{- end }} + {{- if .Values.networkPolicy.extraEgress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.networkPolicy.extraEgress "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} ingress: - # Allow inbound connections - ports: {{- include "tomcat.ports" . | nindent 8 }} {{- if not .Values.networkPolicy.allowExternal }} from: + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 14 }} - podSelector: matchLabels: {{ template "common.names.fullname" . }}-client: "true" - {{- if .Values.networkPolicy.explicitNamespacesSelector }} - namespaceSelector: -{{ toYaml .Values.networkPolicy.explicitNamespacesSelector | indent 12 }} + {{- if .Values.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: + {{- range $key, $value := .Values.networkPolicy.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- if .Values.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.networkPolicy.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} {{- end }} - # Allow communication between Tomcat's POD - - podSelector: - matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 14 }} + {{- end }} {{- end }} {{- if .Values.metrics.jmx.enabled }} # Allow prometheus scrapes - ports: - port: {{ .Values.metrics.jmx.ports.metrics }} {{- end }} + {{- if .Values.networkPolicy.extraIngress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.networkPolicy.extraIngress "context" $ ) | nindent 4 }} + {{- end }} {{- end }} diff --git a/charts/bitnami/tomcat/values.yaml b/charts/bitnami/tomcat/values.yaml index 86ef4d0b5..745764630 100644 --- a/charts/bitnami/tomcat/values.yaml +++ b/charts/bitnami/tomcat/values.yaml @@ -68,7 +68,7 @@ extraDeploy: [] image: registry: docker.io repository: bitnami/tomcat - tag: 10.1.19-debian-12-r2 + tag: 10.1.20-debian-12-r0 digest: "" ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' @@ -180,6 +180,7 @@ podSecurityContext: ## @param containerSecurityContext.enabled Enabled containers' Security Context ## @param containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container ## @param containerSecurityContext.runAsUser Set containers' Security Context runAsUser +## @param containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup ## @param containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot ## @param containerSecurityContext.privileged Set container's Security Context privileged ## @param containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem @@ -189,11 +190,12 @@ podSecurityContext: ## containerSecurityContext: enabled: true - seLinuxOptions: null + seLinuxOptions: {} runAsUser: 1001 + runAsGroup: 1001 runAsNonRoot: true privileged: false - readOnlyRootFilesystem: false + readOnlyRootFilesystem: true allowPrivilegeEscalation: false capabilities: drop: ["ALL"] @@ -208,7 +210,7 @@ containerSecurityContext: ## @param resourcesPreset Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 ## -resourcesPreset: "none" +resourcesPreset: "micro" ## @param resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) ## Example: ## resources: @@ -406,31 +408,61 @@ persistence: ## Applicable when deployment.type is statefulset ## selectorLabels: {} +## Network Policy configuration +## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## networkPolicy: - ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## @param networkPolicy.enabled Specifies whether a NetworkPolicy should be created ## - enabled: false - ## @param networkPolicy.allowExternal Don't require client label for connections + enabled: true + ## @param networkPolicy.allowExternal Don't require server label for connections ## The Policy model to apply. When set to false, only pods with the correct - ## client label will have network access to every tomcat port defined on containerPort and containerExtraPorts. - ## When true, tomcat will accept connections from any source + ## server label will have network access to the ports server is listening + ## on. When true, server will accept connections from any source ## (with the correct destination port). ## allowExternal: true - ## @param networkPolicy.explicitNamespacesSelector A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed - ## If explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace - ## and that match other criteria, the ones that have the good label, can reach the tomcat. - ## But sometimes, we want the tomcat to be accessible to clients from other namespaces, in this case, we can use this - ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## @param networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. ## - ## Example: - ## explicitNamespacesSelector: - ## matchLabels: - ## role: frontend - ## matchExpressions: - ## - {key: role, operator: In, values: [frontend]} + allowExternalEgress: true + ## @param networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolice + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + extraIngress: [] + ## @param networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend ## - explicitNamespacesSelector: {} + extraEgress: [] + ## @param networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces + ## @param networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} ## Service Account ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ ## @@ -636,7 +668,7 @@ volumePermissions: image: registry: docker.io repository: bitnami/os-shell - tag: 12-debian-12-r16 + tag: 12-debian-12-r17 digest: "" pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. @@ -694,7 +726,7 @@ metrics: image: registry: docker.io repository: bitnami/jmx-exporter - tag: 0.20.0-debian-12-r11 + tag: 0.20.0-debian-12-r12 digest: "" ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' @@ -723,6 +755,7 @@ metrics: ## @param metrics.jmx.containerSecurityContext.enabled Enabled containers' Security Context ## @param metrics.jmx.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container ## @param metrics.jmx.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param metrics.jmx.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup ## @param metrics.jmx.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot ## @param metrics.jmx.containerSecurityContext.privileged Set container's Security Context privileged ## @param metrics.jmx.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem @@ -731,11 +764,12 @@ metrics: ## @param metrics.jmx.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile containerSecurityContext: enabled: true - seLinuxOptions: null + seLinuxOptions: {} runAsUser: 1001 + runAsGroup: 1001 runAsNonRoot: true privileged: false - readOnlyRootFilesystem: false + readOnlyRootFilesystem: true allowPrivilegeEscalation: false capabilities: drop: ["ALL"] diff --git a/charts/bitnami/wordpress/Chart.lock b/charts/bitnami/wordpress/Chart.lock index 84ceca8fd..6a861a441 100644 --- a/charts/bitnami/wordpress/Chart.lock +++ b/charts/bitnami/wordpress/Chart.lock @@ -1,12 +1,12 @@ dependencies: - name: memcached repository: oci://registry-1.docker.io/bitnamicharts - version: 7.0.2 + version: 7.0.3 - name: mariadb repository: oci://registry-1.docker.io/bitnamicharts - version: 17.0.1 + version: 18.0.0 - name: common repository: oci://registry-1.docker.io/bitnamicharts - version: 2.19.0 -digest: sha256:3b4e997af36fdaa0116bd43fdbe2bb7575f280e86a1ede66430a47115b7f89ba -generated: "2024-03-20T16:16:30.974197+01:00" + version: 2.19.1 +digest: sha256:9c6b4b3220792623bf8c5886c162486d5e1d95d2de3f139d8763dd76076291d7 +generated: "2024-04-02T16:36:02.420521+02:00" diff --git a/charts/bitnami/wordpress/Chart.yaml b/charts/bitnami/wordpress/Chart.yaml index 0ea7ca459..7666de42b 100644 --- a/charts/bitnami/wordpress/Chart.yaml +++ b/charts/bitnami/wordpress/Chart.yaml @@ -6,14 +6,14 @@ annotations: category: CMS images: | - name: apache-exporter - image: docker.io/bitnami/apache-exporter:1.0.7-debian-12-r0 + image: docker.io/bitnami/apache-exporter:1.0.7-debian-12-r1 - name: os-shell - image: docker.io/bitnami/os-shell:12-debian-12-r17 + image: docker.io/bitnami/os-shell:12-debian-12-r18 - name: wordpress - image: docker.io/bitnami/wordpress:6.4.3-debian-12-r28 + image: docker.io/bitnami/wordpress:6.5.0-debian-12-r0 licenses: Apache-2.0 apiVersion: v2 -appVersion: 6.4.3 +appVersion: 6.5.0 dependencies: - condition: memcached.enabled name: memcached @@ -22,7 +22,7 @@ dependencies: - condition: mariadb.enabled name: mariadb repository: file://./charts/mariadb - version: 17.x.x + version: 18.x.x - name: common repository: file://./charts/common tags: @@ -47,4 +47,4 @@ maintainers: name: wordpress sources: - https://github.com/bitnami/charts/tree/main/bitnami/wordpress -version: 21.0.6 +version: 22.1.0 diff --git a/charts/bitnami/wordpress/README.md b/charts/bitnami/wordpress/README.md index 57663b043..18dce6661 100644 --- a/charts/bitnami/wordpress/README.md +++ b/charts/bitnami/wordpress/README.md @@ -325,72 +325,72 @@ As an alternative, use one of the preset configurations for pod affinity, pod an ### WordPress deployment parameters -| Name | Description | Value | -| --------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------- | -| `replicaCount` | Number of WordPress replicas to deploy | `1` | -| `updateStrategy.type` | WordPress deployment strategy type | `RollingUpdate` | -| `schedulerName` | Alternate scheduler | `""` | -| `terminationGracePeriodSeconds` | In seconds, time given to the WordPress pod to terminate gracefully | `""` | -| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | -| `priorityClassName` | Name of the existing priority class to be used by WordPress pods, priority class needs to be created beforehand | `""` | -| `automountServiceAccountToken` | Mount Service Account token in pod | `false` | -| `hostAliases` | WordPress pod host aliases | `[]` | -| `extraVolumes` | Optionally specify extra list of additional volumes for WordPress pods | `[]` | -| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for WordPress container(s) | `[]` | -| `sidecars` | Add additional sidecar containers to the WordPress pod | `[]` | -| `initContainers` | Add additional init containers to the WordPress pods | `[]` | -| `podLabels` | Extra labels for WordPress pods | `{}` | -| `podAnnotations` | Annotations for WordPress pods | `{}` | -| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | -| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | -| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | -| `nodeAffinityPreset.key` | Node label key to match. Ignored if `affinity` is set | `""` | -| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set | `[]` | -| `affinity` | Affinity for pod assignment | `{}` | -| `nodeSelector` | Node labels for pod assignment | `{}` | -| `tolerations` | Tolerations for pod assignment | `[]` | -| `resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `micro` | -| `resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | -| `containerPorts.http` | WordPress HTTP container port | `8080` | -| `containerPorts.https` | WordPress HTTPS container port | `8443` | -| `extraContainerPorts` | Optionally specify extra list of additional ports for WordPress container(s) | `[]` | -| `podSecurityContext.enabled` | Enabled WordPress pods' Security Context | `true` | -| `podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | -| `podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | -| `podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | -| `podSecurityContext.fsGroup` | Set WordPress pod's Security Context fsGroup | `1001` | -| `containerSecurityContext.enabled` | Enabled containers' Security Context | `true` | -| `containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | -| `containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | -| `containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | -| `containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` | -| `containerSecurityContext.privileged` | Set container's Security Context privileged | `false` | -| `containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `true` | -| `containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` | -| `containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | -| `containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | -| `livenessProbe.enabled` | Enable livenessProbe on WordPress containers | `true` | -| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `120` | -| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | -| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | -| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | -| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | -| `readinessProbe.enabled` | Enable readinessProbe on WordPress containers | `true` | -| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `30` | -| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | -| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | -| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | -| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | -| `startupProbe.enabled` | Enable startupProbe on WordPress containers | `false` | -| `startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` | -| `startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | -| `startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | -| `startupProbe.failureThreshold` | Failure threshold for startupProbe | `6` | -| `startupProbe.successThreshold` | Success threshold for startupProbe | `1` | -| `customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | -| `customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | -| `customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | -| `lifecycleHooks` | for the WordPress container(s) to automate configuration before or after startup | `{}` | +| Name | Description | Value | +| --------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------- | +| `replicaCount` | Number of WordPress replicas to deploy | `1` | +| `updateStrategy.type` | WordPress deployment strategy type | `RollingUpdate` | +| `schedulerName` | Alternate scheduler | `""` | +| `terminationGracePeriodSeconds` | In seconds, time given to the WordPress pod to terminate gracefully | `""` | +| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `priorityClassName` | Name of the existing priority class to be used by WordPress pods, priority class needs to be created beforehand | `""` | +| `automountServiceAccountToken` | Mount Service Account token in pod | `false` | +| `hostAliases` | WordPress pod host aliases | `[]` | +| `extraVolumes` | Optionally specify extra list of additional volumes for WordPress pods | `[]` | +| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for WordPress container(s) | `[]` | +| `sidecars` | Add additional sidecar containers to the WordPress pod | `[]` | +| `initContainers` | Add additional init containers to the WordPress pods | `[]` | +| `podLabels` | Extra labels for WordPress pods | `{}` | +| `podAnnotations` | Annotations for WordPress pods | `{}` | +| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `nodeAffinityPreset.key` | Node label key to match. Ignored if `affinity` is set | `""` | +| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set | `[]` | +| `affinity` | Affinity for pod assignment | `{}` | +| `nodeSelector` | Node labels for pod assignment | `{}` | +| `tolerations` | Tolerations for pod assignment | `[]` | +| `resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `micro` | +| `resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `containerPorts.http` | WordPress HTTP container port | `8080` | +| `containerPorts.https` | WordPress HTTPS container port | `8443` | +| `extraContainerPorts` | Optionally specify extra list of additional ports for WordPress container(s) | `[]` | +| `podSecurityContext.enabled` | Enabled WordPress pods' Security Context | `true` | +| `podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | +| `podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | +| `podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | +| `podSecurityContext.fsGroup` | Set WordPress pod's Security Context fsGroup | `1001` | +| `containerSecurityContext.enabled` | Enabled containers' Security Context | `true` | +| `containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | +| `containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` | +| `containerSecurityContext.privileged` | Set container's Security Context privileged | `false` | +| `containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `true` | +| `containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` | +| `containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `livenessProbe.enabled` | Enable livenessProbe on WordPress containers | `true` | +| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `120` | +| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `readinessProbe.enabled` | Enable readinessProbe on WordPress containers | `true` | +| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `30` | +| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `startupProbe.enabled` | Enable startupProbe on WordPress containers | `false` | +| `startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` | +| `startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | +| `startupProbe.failureThreshold` | Failure threshold for startupProbe | `6` | +| `startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `lifecycleHooks` | for the WordPress container(s) to automate configuration before or after startup | `{}` | ### Traffic Exposure Parameters @@ -428,27 +428,27 @@ As an alternative, use one of the preset configurations for pod affinity, pod an ### Persistence Parameters -| Name | Description | Value | -| ----------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------- | -| `persistence.enabled` | Enable persistence using Persistent Volume Claims | `true` | -| `persistence.storageClass` | Persistent Volume storage class | `""` | -| `persistence.accessModes` | Persistent Volume access modes | `[]` | -| `persistence.accessMode` | Persistent Volume access mode (DEPRECATED: use `persistence.accessModes` instead) | `ReadWriteOnce` | -| `persistence.size` | Persistent Volume size | `10Gi` | -| `persistence.dataSource` | Custom PVC data source | `{}` | -| `persistence.existingClaim` | The name of an existing PVC to use for persistence | `""` | -| `persistence.selector` | Selector to match an existing Persistent Volume for WordPress data PVC | `{}` | -| `persistence.annotations` | Persistent Volume Claim annotations | `{}` | -| `volumePermissions.enabled` | Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup` | `false` | -| `volumePermissions.image.registry` | OS Shell + Utility image registry | `REGISTRY_NAME` | -| `volumePermissions.image.repository` | OS Shell + Utility image repository | `REPOSITORY_NAME/os-shell` | -| `volumePermissions.image.digest` | OS Shell + Utility image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | -| `volumePermissions.image.pullPolicy` | OS Shell + Utility image pull policy | `IfNotPresent` | -| `volumePermissions.image.pullSecrets` | OS Shell + Utility image pull secrets | `[]` | -| `volumePermissions.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production). | `nano` | -| `volumePermissions.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | -| `volumePermissions.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | -| `volumePermissions.containerSecurityContext.runAsUser` | User ID for the init container | `0` | +| Name | Description | Value | +| ----------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------- | +| `persistence.enabled` | Enable persistence using Persistent Volume Claims | `true` | +| `persistence.storageClass` | Persistent Volume storage class | `""` | +| `persistence.accessModes` | Persistent Volume access modes | `[]` | +| `persistence.accessMode` | Persistent Volume access mode (DEPRECATED: use `persistence.accessModes` instead) | `ReadWriteOnce` | +| `persistence.size` | Persistent Volume size | `10Gi` | +| `persistence.dataSource` | Custom PVC data source | `{}` | +| `persistence.existingClaim` | The name of an existing PVC to use for persistence | `""` | +| `persistence.selector` | Selector to match an existing Persistent Volume for WordPress data PVC | `{}` | +| `persistence.annotations` | Persistent Volume Claim annotations | `{}` | +| `volumePermissions.enabled` | Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup` | `false` | +| `volumePermissions.image.registry` | OS Shell + Utility image registry | `REGISTRY_NAME` | +| `volumePermissions.image.repository` | OS Shell + Utility image repository | `REPOSITORY_NAME/os-shell` | +| `volumePermissions.image.digest` | OS Shell + Utility image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `volumePermissions.image.pullPolicy` | OS Shell + Utility image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | OS Shell + Utility image pull secrets | `[]` | +| `volumePermissions.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production). | `nano` | +| `volumePermissions.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `volumePermissions.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `volumePermissions.containerSecurityContext.runAsUser` | User ID for the init container | `0` | ### Other Parameters @@ -469,59 +469,59 @@ As an alternative, use one of the preset configurations for pod affinity, pod an ### Metrics Parameters -| Name | Description | Value | -| ----------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------- | -| `metrics.enabled` | Start a sidecar prometheus exporter to expose metrics | `false` | -| `metrics.image.registry` | Apache exporter image registry | `REGISTRY_NAME` | -| `metrics.image.repository` | Apache exporter image repository | `REPOSITORY_NAME/apache-exporter` | -| `metrics.image.digest` | Apache exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | -| `metrics.image.pullPolicy` | Apache exporter image pull policy | `IfNotPresent` | -| `metrics.image.pullSecrets` | Apache exporter image pull secrets | `[]` | -| `metrics.containerPorts.metrics` | Prometheus exporter container port | `9117` | -| `metrics.livenessProbe.enabled` | Enable livenessProbe on Prometheus exporter containers | `true` | -| `metrics.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `15` | -| `metrics.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | -| `metrics.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | -| `metrics.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` | -| `metrics.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | -| `metrics.readinessProbe.enabled` | Enable readinessProbe on Prometheus exporter containers | `true` | -| `metrics.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | -| `metrics.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | -| `metrics.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `3` | -| `metrics.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` | -| `metrics.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | -| `metrics.startupProbe.enabled` | Enable startupProbe on Prometheus exporter containers | `false` | -| `metrics.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `10` | -| `metrics.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | -| `metrics.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | -| `metrics.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | -| `metrics.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | -| `metrics.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | -| `metrics.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | -| `metrics.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | -| `metrics.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if metrics.resources is set (metrics.resources is recommended for production). | `nano` | -| `metrics.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | -| `metrics.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` | -| `metrics.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | -| `metrics.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | -| `metrics.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` | -| `metrics.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` | -| `metrics.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `false` | -| `metrics.containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` | -| `metrics.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | -| `metrics.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | -| `metrics.service.ports.metrics` | Prometheus metrics service port | `9150` | -| `metrics.service.annotations` | Additional custom annotations for Metrics service | `{}` | -| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using Prometheus Operator | `false` | -| `metrics.serviceMonitor.namespace` | Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) | `""` | -| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `""` | -| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | -| `metrics.serviceMonitor.labels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | -| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | -| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | -| `metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | -| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | -| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | +| Name | Description | Value | +| ----------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------- | +| `metrics.enabled` | Start a sidecar prometheus exporter to expose metrics | `false` | +| `metrics.image.registry` | Apache exporter image registry | `REGISTRY_NAME` | +| `metrics.image.repository` | Apache exporter image repository | `REPOSITORY_NAME/apache-exporter` | +| `metrics.image.digest` | Apache exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `metrics.image.pullPolicy` | Apache exporter image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Apache exporter image pull secrets | `[]` | +| `metrics.containerPorts.metrics` | Prometheus exporter container port | `9117` | +| `metrics.livenessProbe.enabled` | Enable livenessProbe on Prometheus exporter containers | `true` | +| `metrics.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `15` | +| `metrics.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `metrics.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `metrics.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` | +| `metrics.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `metrics.readinessProbe.enabled` | Enable readinessProbe on Prometheus exporter containers | `true` | +| `metrics.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `metrics.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `metrics.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `3` | +| `metrics.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` | +| `metrics.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `metrics.startupProbe.enabled` | Enable startupProbe on Prometheus exporter containers | `false` | +| `metrics.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `10` | +| `metrics.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `metrics.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `metrics.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `metrics.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `metrics.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `metrics.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `metrics.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `metrics.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if metrics.resources is set (metrics.resources is recommended for production). | `nano` | +| `metrics.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `metrics.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` | +| `metrics.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `metrics.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `metrics.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` | +| `metrics.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` | +| `metrics.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `false` | +| `metrics.containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` | +| `metrics.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `metrics.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `metrics.service.ports.metrics` | Prometheus metrics service port | `9150` | +| `metrics.service.annotations` | Additional custom annotations for Metrics service | `{}` | +| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using Prometheus Operator | `false` | +| `metrics.serviceMonitor.namespace` | Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) | `""` | +| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `""` | +| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.labels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | +| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | +| `metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | +| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | +| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | ### NetworkPolicy parameters @@ -617,6 +617,10 @@ To enable the new features, it is not possible to do it by upgrading an existing ## Upgrading +### To 22.0.0 + +This major release bumps the MariaDB chart version to [18.x.x](https://github.com/bitnami/charts/pull/24804); no major issues are expected during the upgrade. + ### To 21.0.0 This major bump changes the following security defaults: @@ -631,7 +635,7 @@ This could potentially break any customization or init scripts used in your depl ### To 20.0.0 -This major release bumps the and MariaDB chart version to [16.x.x](https://github.com/bitnami/charts/pull/23054); no major issues are expected during the upgrade. +This major release bumps the MariaDB chart version to [16.x.x](https://github.com/bitnami/charts/pull/23054); no major issues are expected during the upgrade. ### To 19.0.0 diff --git a/charts/bitnami/wordpress/charts/common/Chart.yaml b/charts/bitnami/wordpress/charts/common/Chart.yaml index f86ccd23a..8d0e54694 100644 --- a/charts/bitnami/wordpress/charts/common/Chart.yaml +++ b/charts/bitnami/wordpress/charts/common/Chart.yaml @@ -2,7 +2,7 @@ annotations: category: Infrastructure licenses: Apache-2.0 apiVersion: v2 -appVersion: 2.19.0 +appVersion: 2.19.1 description: A Library Helm Chart for grouping common logic between bitnami charts. This chart is not deployable by itself. home: https://bitnami.com @@ -20,4 +20,4 @@ name: common sources: - https://github.com/bitnami/charts type: library -version: 2.19.0 +version: 2.19.1 diff --git a/charts/bitnami/wordpress/charts/common/templates/_resources.tpl b/charts/bitnami/wordpress/charts/common/templates/_resources.tpl index d90f8752d..030fa1a99 100644 --- a/charts/bitnami/wordpress/charts/common/templates/_resources.tpl +++ b/charts/bitnami/wordpress/charts/common/templates/_resources.tpl @@ -11,7 +11,7 @@ These presets are for basic testing and not meant to be used in production {{ include "common.resources.preset" (dict "type" "nano") -}} */}} {{- define "common.resources.preset" -}} -{{/* The limits are the requests increased by 50% (except ephemeral-storage)*/}} +{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}} {{- $presets := dict "nano" (dict "requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi") @@ -34,11 +34,11 @@ These presets are for basic testing and not meant to be used in production "limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "1024Mi") ) "xlarge" (dict - "requests" (dict "cpu" "2.0" "memory" "4096Mi" "ephemeral-storage" "50Mi") + "requests" (dict "cpu" "1.5" "memory" "4096Mi" "ephemeral-storage" "50Mi") "limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "1024Mi") ) "2xlarge" (dict - "requests" (dict "cpu" "4.0" "memory" "8192Mi" "ephemeral-storage" "50Mi") + "requests" (dict "cpu" "1.5" "memory" "4096Mi" "ephemeral-storage" "50Mi") "limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "1024Mi") ) }} @@ -47,4 +47,4 @@ These presets are for basic testing and not meant to be used in production {{- else -}} {{- printf "ERROR: Preset key '%s' invalid. Allowed values are %s" .type (join "," (keys $presets)) | fail -}} {{- end -}} -{{- end -}} \ No newline at end of file +{{- end -}} diff --git a/charts/bitnami/wordpress/charts/mariadb/Chart.lock b/charts/bitnami/wordpress/charts/mariadb/Chart.lock index 220f0e4b2..0d8862e38 100644 --- a/charts/bitnami/wordpress/charts/mariadb/Chart.lock +++ b/charts/bitnami/wordpress/charts/mariadb/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: common repository: oci://registry-1.docker.io/bitnamicharts - version: 2.19.0 -digest: sha256:ac559eb57710d8904e266424ee364cd686d7e24517871f0c5c67f7c4500c2bcc -generated: "2024-03-08T11:25:32.224991562+01:00" + version: 2.19.1 +digest: sha256:c883732817d9aaa3304f7b3109262aa338959de15b432dc5a2dbde13d2e136a5 +generated: "2024-04-02T11:21:12.855408532Z" diff --git a/charts/bitnami/wordpress/charts/mariadb/Chart.yaml b/charts/bitnami/wordpress/charts/mariadb/Chart.yaml index 5d5ddf9b5..114174ac9 100644 --- a/charts/bitnami/wordpress/charts/mariadb/Chart.yaml +++ b/charts/bitnami/wordpress/charts/mariadb/Chart.yaml @@ -2,14 +2,14 @@ annotations: category: Database images: | - name: mariadb - image: docker.io/bitnami/mariadb:11.2.3-debian-12-r4 + image: docker.io/bitnami/mariadb:11.3.2-debian-12-r0 - name: mysqld-exporter - image: docker.io/bitnami/mysqld-exporter:0.15.1-debian-12-r8 + image: docker.io/bitnami/mysqld-exporter:0.15.1-debian-12-r10 - name: os-shell - image: docker.io/bitnami/os-shell:12-debian-12-r16 + image: docker.io/bitnami/os-shell:12-debian-12-r18 licenses: Apache-2.0 apiVersion: v2 -appVersion: 11.2.3 +appVersion: 11.3.2 dependencies: - name: common repository: oci://registry-1.docker.io/bitnamicharts @@ -33,4 +33,4 @@ maintainers: name: mariadb sources: - https://github.com/bitnami/charts/tree/main/bitnami/mariadb -version: 17.0.1 +version: 18.0.0 diff --git a/charts/bitnami/wordpress/charts/mariadb/charts/common/Chart.yaml b/charts/bitnami/wordpress/charts/mariadb/charts/common/Chart.yaml index f86ccd23a..8d0e54694 100644 --- a/charts/bitnami/wordpress/charts/mariadb/charts/common/Chart.yaml +++ b/charts/bitnami/wordpress/charts/mariadb/charts/common/Chart.yaml @@ -2,7 +2,7 @@ annotations: category: Infrastructure licenses: Apache-2.0 apiVersion: v2 -appVersion: 2.19.0 +appVersion: 2.19.1 description: A Library Helm Chart for grouping common logic between bitnami charts. This chart is not deployable by itself. home: https://bitnami.com @@ -20,4 +20,4 @@ name: common sources: - https://github.com/bitnami/charts type: library -version: 2.19.0 +version: 2.19.1 diff --git a/charts/bitnami/wordpress/charts/mariadb/charts/common/templates/_resources.tpl b/charts/bitnami/wordpress/charts/mariadb/charts/common/templates/_resources.tpl index d90f8752d..030fa1a99 100644 --- a/charts/bitnami/wordpress/charts/mariadb/charts/common/templates/_resources.tpl +++ b/charts/bitnami/wordpress/charts/mariadb/charts/common/templates/_resources.tpl @@ -11,7 +11,7 @@ These presets are for basic testing and not meant to be used in production {{ include "common.resources.preset" (dict "type" "nano") -}} */}} {{- define "common.resources.preset" -}} -{{/* The limits are the requests increased by 50% (except ephemeral-storage)*/}} +{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}} {{- $presets := dict "nano" (dict "requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi") @@ -34,11 +34,11 @@ These presets are for basic testing and not meant to be used in production "limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "1024Mi") ) "xlarge" (dict - "requests" (dict "cpu" "2.0" "memory" "4096Mi" "ephemeral-storage" "50Mi") + "requests" (dict "cpu" "1.5" "memory" "4096Mi" "ephemeral-storage" "50Mi") "limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "1024Mi") ) "2xlarge" (dict - "requests" (dict "cpu" "4.0" "memory" "8192Mi" "ephemeral-storage" "50Mi") + "requests" (dict "cpu" "1.5" "memory" "4096Mi" "ephemeral-storage" "50Mi") "limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "1024Mi") ) }} @@ -47,4 +47,4 @@ These presets are for basic testing and not meant to be used in production {{- else -}} {{- printf "ERROR: Preset key '%s' invalid. Allowed values are %s" .type (join "," (keys $presets)) | fail -}} {{- end -}} -{{- end -}} \ No newline at end of file +{{- end -}} diff --git a/charts/bitnami/wordpress/charts/mariadb/values.yaml b/charts/bitnami/wordpress/charts/mariadb/values.yaml index 64d1a00e0..207ed8fcb 100644 --- a/charts/bitnami/wordpress/charts/mariadb/values.yaml +++ b/charts/bitnami/wordpress/charts/mariadb/values.yaml @@ -95,7 +95,7 @@ serviceBindings: image: registry: docker.io repository: bitnami/mariadb - tag: 11.2.3-debian-12-r4 + tag: 11.3.2-debian-12-r0 digest: "" ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' @@ -1053,7 +1053,7 @@ volumePermissions: image: registry: docker.io repository: bitnami/os-shell - tag: 12-debian-12-r16 + tag: 12-debian-12-r18 digest: "" pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) @@ -1097,7 +1097,7 @@ metrics: image: registry: docker.io repository: bitnami/mysqld-exporter - tag: 0.15.1-debian-12-r8 + tag: 0.15.1-debian-12-r10 digest: "" pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) @@ -1321,8 +1321,8 @@ networkPolicy: ## enabled: true ## @param networkPolicy.allowExternal The Policy model to apply - ## When set to false, only pods with the correct client label will have network access to the ports Keycloak is - ## listening on. When true, Keycloak will accept connections from any source (with the correct destination port). + ## When set to false, only pods with the correct client label will have network access to the ports MariaDB is + ## listening on. When true, MariaDB will accept connections from any source (with the correct destination port). ## allowExternal: true ## @param networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. diff --git a/charts/bitnami/wordpress/charts/memcached/Chart.lock b/charts/bitnami/wordpress/charts/memcached/Chart.lock index 447926af8..e35e33e2b 100644 --- a/charts/bitnami/wordpress/charts/memcached/Chart.lock +++ b/charts/bitnami/wordpress/charts/memcached/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: common repository: oci://registry-1.docker.io/bitnamicharts - version: 2.19.0 -digest: sha256:ac559eb57710d8904e266424ee364cd686d7e24517871f0c5c67f7c4500c2bcc -generated: "2024-03-11T17:28:48.470772529+01:00" + version: 2.19.1 +digest: sha256:c883732817d9aaa3304f7b3109262aa338959de15b432dc5a2dbde13d2e136a5 +generated: "2024-03-27T22:17:53.212914918Z" diff --git a/charts/bitnami/wordpress/charts/memcached/Chart.yaml b/charts/bitnami/wordpress/charts/memcached/Chart.yaml index e5ccc747d..5b4f9acd1 100644 --- a/charts/bitnami/wordpress/charts/memcached/Chart.yaml +++ b/charts/bitnami/wordpress/charts/memcached/Chart.yaml @@ -2,14 +2,14 @@ annotations: category: Infrastructure images: | - name: memcached - image: docker.io/bitnami/memcached:1.6.25-debian-12-r0 + image: docker.io/bitnami/memcached:1.6.26-debian-12-r0 - name: memcached-exporter - image: docker.io/bitnami/memcached-exporter:0.14.2-debian-12-r11 + image: docker.io/bitnami/memcached-exporter:0.14.3-debian-12-r0 - name: os-shell - image: docker.io/bitnami/os-shell:12-debian-12-r16 + image: docker.io/bitnami/os-shell:12-debian-12-r17 licenses: Apache-2.0 apiVersion: v2 -appVersion: 1.6.25 +appVersion: 1.6.26 dependencies: - name: common repository: oci://registry-1.docker.io/bitnamicharts @@ -30,4 +30,4 @@ maintainers: name: memcached sources: - https://github.com/bitnami/charts/tree/main/bitnami/memcached -version: 7.0.2 +version: 7.0.3 diff --git a/charts/bitnami/wordpress/charts/memcached/charts/common/Chart.yaml b/charts/bitnami/wordpress/charts/memcached/charts/common/Chart.yaml index f86ccd23a..8d0e54694 100644 --- a/charts/bitnami/wordpress/charts/memcached/charts/common/Chart.yaml +++ b/charts/bitnami/wordpress/charts/memcached/charts/common/Chart.yaml @@ -2,7 +2,7 @@ annotations: category: Infrastructure licenses: Apache-2.0 apiVersion: v2 -appVersion: 2.19.0 +appVersion: 2.19.1 description: A Library Helm Chart for grouping common logic between bitnami charts. This chart is not deployable by itself. home: https://bitnami.com @@ -20,4 +20,4 @@ name: common sources: - https://github.com/bitnami/charts type: library -version: 2.19.0 +version: 2.19.1 diff --git a/charts/bitnami/wordpress/charts/memcached/charts/common/templates/_resources.tpl b/charts/bitnami/wordpress/charts/memcached/charts/common/templates/_resources.tpl index d90f8752d..030fa1a99 100644 --- a/charts/bitnami/wordpress/charts/memcached/charts/common/templates/_resources.tpl +++ b/charts/bitnami/wordpress/charts/memcached/charts/common/templates/_resources.tpl @@ -11,7 +11,7 @@ These presets are for basic testing and not meant to be used in production {{ include "common.resources.preset" (dict "type" "nano") -}} */}} {{- define "common.resources.preset" -}} -{{/* The limits are the requests increased by 50% (except ephemeral-storage)*/}} +{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}} {{- $presets := dict "nano" (dict "requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi") @@ -34,11 +34,11 @@ These presets are for basic testing and not meant to be used in production "limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "1024Mi") ) "xlarge" (dict - "requests" (dict "cpu" "2.0" "memory" "4096Mi" "ephemeral-storage" "50Mi") + "requests" (dict "cpu" "1.5" "memory" "4096Mi" "ephemeral-storage" "50Mi") "limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "1024Mi") ) "2xlarge" (dict - "requests" (dict "cpu" "4.0" "memory" "8192Mi" "ephemeral-storage" "50Mi") + "requests" (dict "cpu" "1.5" "memory" "4096Mi" "ephemeral-storage" "50Mi") "limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "1024Mi") ) }} @@ -47,4 +47,4 @@ These presets are for basic testing and not meant to be used in production {{- else -}} {{- printf "ERROR: Preset key '%s' invalid. Allowed values are %s" .type (join "," (keys $presets)) | fail -}} {{- end -}} -{{- end -}} \ No newline at end of file +{{- end -}} diff --git a/charts/bitnami/wordpress/charts/memcached/values.yaml b/charts/bitnami/wordpress/charts/memcached/values.yaml index 904dc7728..193ccb792 100644 --- a/charts/bitnami/wordpress/charts/memcached/values.yaml +++ b/charts/bitnami/wordpress/charts/memcached/values.yaml @@ -79,7 +79,7 @@ diagnosticMode: image: registry: docker.io repository: bitnami/memcached - tag: 1.6.25-debian-12-r0 + tag: 1.6.26-debian-12-r0 digest: "" ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' @@ -476,8 +476,8 @@ networkPolicy: ## enabled: true ## @param networkPolicy.allowExternal The Policy model to apply - ## When set to false, only pods with the correct client label will have network access to the ports Keycloak is - ## listening on. When true, Keycloak will accept connections from any source (with the correct destination port). + ## When set to false, only pods with the correct client label will have network access to the ports Memcached is + ## listening on. When true, Memcached will accept connections from any source (with the correct destination port). ## allowExternal: true ## @param networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. @@ -600,7 +600,7 @@ volumePermissions: image: registry: docker.io repository: bitnami/os-shell - tag: 12-debian-12-r16 + tag: 12-debian-12-r17 digest: "" pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. @@ -655,7 +655,7 @@ metrics: image: registry: docker.io repository: bitnami/memcached-exporter - tag: 0.14.2-debian-12-r11 + tag: 0.14.3-debian-12-r0 digest: "" pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. diff --git a/charts/bitnami/wordpress/templates/deployment.yaml b/charts/bitnami/wordpress/templates/deployment.yaml index ccbd12a8f..136b30c9d 100644 --- a/charts/bitnami/wordpress/templates/deployment.yaml +++ b/charts/bitnami/wordpress/templates/deployment.yaml @@ -125,6 +125,7 @@ spec: #!/bin/bash . /opt/bitnami/scripts/liblog.sh + . /opt/bitnami/scripts/libfs.sh info "Copying base dir to empty dir" # In order to not break the application functionality (such as upgrades or plugins) we need diff --git a/charts/bitnami/wordpress/values.yaml b/charts/bitnami/wordpress/values.yaml index ce6b06159..7bda9ef44 100644 --- a/charts/bitnami/wordpress/values.yaml +++ b/charts/bitnami/wordpress/values.yaml @@ -82,7 +82,7 @@ diagnosticMode: image: registry: docker.io repository: bitnami/wordpress - tag: 6.4.3-debian-12-r28 + tag: 6.5.0-debian-12-r0 digest: "" ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' @@ -381,7 +381,7 @@ nodeSelector: {} tolerations: [] ## WordPress containers' resource requests and limits ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ -## @param resourcesPreset Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). +## @param resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 ## resourcesPreset: "micro" @@ -787,7 +787,7 @@ volumePermissions: image: registry: docker.io repository: bitnami/os-shell - tag: 12-debian-12-r17 + tag: 12-debian-12-r18 digest: "" pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. @@ -800,7 +800,7 @@ volumePermissions: pullSecrets: [] ## Init container's resource requests and limits ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - ## @param volumePermissions.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production). + ## @param volumePermissions.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production). ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 ## resourcesPreset: "nano" @@ -890,7 +890,7 @@ metrics: image: registry: docker.io repository: bitnami/apache-exporter - tag: 1.0.7-debian-12-r0 + tag: 1.0.7-debian-12-r1 digest: "" pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. @@ -960,7 +960,7 @@ metrics: customStartupProbe: {} ## Prometheus exporter container's resource requests and limits ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - ## @param metrics.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if metrics.resources is set (metrics.resources is recommended for production). + ## @param metrics.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if metrics.resources is set (metrics.resources is recommended for production). ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 ## resourcesPreset: "nano" diff --git a/charts/bitnami/zookeeper/Chart.yaml b/charts/bitnami/zookeeper/Chart.yaml index 6bfe8d7da..33d05354c 100644 --- a/charts/bitnami/zookeeper/Chart.yaml +++ b/charts/bitnami/zookeeper/Chart.yaml @@ -30,4 +30,4 @@ maintainers: name: zookeeper sources: - https://github.com/bitnami/charts/tree/main/bitnami/zookeeper -version: 13.0.1 +version: 13.1.0 diff --git a/charts/bitnami/zookeeper/README.md b/charts/bitnami/zookeeper/README.md index 289c5383c..1170c4c45 100644 --- a/charts/bitnami/zookeeper/README.md +++ b/charts/bitnami/zookeeper/README.md @@ -209,6 +209,7 @@ As an alternative, you can use any of the preset configurations for pod affinity | Name | Description | Value | | --------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------- | | `replicaCount` | Number of ZooKeeper nodes | `1` | +| `revisionHistoryLimit` | The number of old history to retain to allow rollback | `10` | | `containerPorts.client` | ZooKeeper client container port | `2181` | | `containerPorts.tls` | ZooKeeper TLS container port | `3181` | | `containerPorts.follower` | ZooKeeper follower container port | `2888` | diff --git a/charts/bitnami/zookeeper/templates/statefulset.yaml b/charts/bitnami/zookeeper/templates/statefulset.yaml index 82b2208de..e0b1b050d 100644 --- a/charts/bitnami/zookeeper/templates/statefulset.yaml +++ b/charts/bitnami/zookeeper/templates/statefulset.yaml @@ -16,6 +16,7 @@ metadata: {{- end }} spec: replicas: {{ .Values.replicaCount }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} podManagementPolicy: {{ .Values.podManagementPolicy }} {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }} selector: diff --git a/charts/bitnami/zookeeper/values.yaml b/charts/bitnami/zookeeper/values.yaml index 17870cf51..c2763e30d 100644 --- a/charts/bitnami/zookeeper/values.yaml +++ b/charts/bitnami/zookeeper/values.yaml @@ -246,6 +246,9 @@ args: [] ## @param replicaCount Number of ZooKeeper nodes ## replicaCount: 1 +## @param revisionHistoryLimit The number of old history to retain to allow rollback +## +revisionHistoryLimit: 10 ## @param containerPorts.client ZooKeeper client container port ## @param containerPorts.tls ZooKeeper TLS container port ## @param containerPorts.follower ZooKeeper follower container port diff --git a/charts/cockroach-labs/cockroachdb/Chart.yaml b/charts/cockroach-labs/cockroachdb/Chart.yaml index 51f7a6dec..a0c3cd1c4 100644 --- a/charts/cockroach-labs/cockroachdb/Chart.yaml +++ b/charts/cockroach-labs/cockroachdb/Chart.yaml @@ -4,7 +4,7 @@ annotations: catalog.cattle.io/kube-version: '>=1.8-0' catalog.cattle.io/release-name: cockroachdb apiVersion: v1 -appVersion: 23.2.2 +appVersion: 23.2.3 description: CockroachDB is a scalable, survivable, strongly-consistent SQL database. home: https://www.cockroachlabs.com icon: https://raw.githubusercontent.com/cockroachdb/cockroach/master/docs/media/cockroach_db.png @@ -14,4 +14,4 @@ maintainers: name: cockroachdb sources: - https://github.com/cockroachdb/cockroach -version: 12.0.2 +version: 12.0.3 diff --git a/charts/cockroach-labs/cockroachdb/README.md b/charts/cockroach-labs/cockroachdb/README.md index 07d438772..e90bb56f2 100644 --- a/charts/cockroach-labs/cockroachdb/README.md +++ b/charts/cockroach-labs/cockroachdb/README.md @@ -229,10 +229,10 @@ kubectl get pods \ ``` ``` -my-release-cockroachdb-0 cockroachdb/cockroach:v23.2.2 -my-release-cockroachdb-1 cockroachdb/cockroach:v23.2.2 -my-release-cockroachdb-2 cockroachdb/cockroach:v23.2.2 -my-release-cockroachdb-3 cockroachdb/cockroach:v23.2.2 +my-release-cockroachdb-0 cockroachdb/cockroach:v23.2.3 +my-release-cockroachdb-1 cockroachdb/cockroach:v23.2.3 +my-release-cockroachdb-2 cockroachdb/cockroach:v23.2.3 +my-release-cockroachdb-3 cockroachdb/cockroach:v23.2.3 ``` Resume normal operations. Once you are comfortable that the stability and performance of the cluster is what you'd expect post-upgrade, finalize the upgrade: @@ -316,7 +316,7 @@ For details see the [`values.yaml`](values.yaml) file. | `conf.store.size` | CockroachDB storage size | `""` | | `conf.store.attrs` | CockroachDB storage attributes | `""` | | `image.repository` | Container image name | `cockroachdb/cockroach` | -| `image.tag` | Container image tag | `v23.2.2` | +| `image.tag` | Container image tag | `v23.2.3` | | `image.pullPolicy` | Container pull policy | `IfNotPresent` | | `image.credentials` | `registry`, `user` and `pass` credentials to pull private image | `{}` | | `statefulset.replicas` | StatefulSet replicas number | `3` | diff --git a/charts/cockroach-labs/cockroachdb/values.yaml b/charts/cockroach-labs/cockroachdb/values.yaml index 7c757022b..26e8db68b 100644 --- a/charts/cockroach-labs/cockroachdb/values.yaml +++ b/charts/cockroach-labs/cockroachdb/values.yaml @@ -7,7 +7,7 @@ fullnameOverride: "" image: repository: cockroachdb/cockroach - tag: v23.2.2 + tag: v23.2.3 pullPolicy: IfNotPresent credentials: {} # registry: docker.io diff --git a/charts/datadog/datadog-operator/CHANGELOG.md b/charts/datadog/datadog-operator/CHANGELOG.md index eb566f658..53cf3c005 100644 --- a/charts/datadog/datadog-operator/CHANGELOG.md +++ b/charts/datadog/datadog-operator/CHANGELOG.md @@ -1,5 +1,9 @@ # Changelog +## 1.6.0 + +* Update Datadog Operator version to 1.5.0. + ## 1.5.2 * Add deprecation warning for `DatadogAgent` `v1alpha1` CRD version. diff --git a/charts/datadog/datadog-operator/Chart.lock b/charts/datadog/datadog-operator/Chart.lock index 6632b4a31..be00d4f6a 100644 --- a/charts/datadog/datadog-operator/Chart.lock +++ b/charts/datadog/datadog-operator/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: datadog-crds repository: https://helm.datadoghq.com - version: 1.4.0 -digest: sha256:051b894b6d03a9a78919a1549b891592cb1aa82e59386c237b93241bdba7054c -generated: "2024-02-15T15:04:10.736131-05:00" + version: 1.5.0 +digest: sha256:0d4930313af68e7bb8e9074b782b5e09fd47201343f25f5bf0c01a3e0d920a47 +generated: "2024-03-28T13:57:10.146369-04:00" diff --git a/charts/datadog/datadog-operator/Chart.yaml b/charts/datadog/datadog-operator/Chart.yaml index 1dcbc7bcb..a39ef053e 100644 --- a/charts/datadog/datadog-operator/Chart.yaml +++ b/charts/datadog/datadog-operator/Chart.yaml @@ -3,7 +3,7 @@ annotations: catalog.cattle.io/display-name: Datadog Operator catalog.cattle.io/release-name: datadog-operator apiVersion: v2 -appVersion: 1.4.0 +appVersion: 1.5.0 dependencies: - alias: datadogCRDs condition: installCRDs @@ -11,7 +11,7 @@ dependencies: repository: file://./charts/datadog-crds tags: - install-crds - version: =1.4.0 + version: =1.5.0 description: Datadog Operator home: https://www.datadoghq.com icon: https://datadog-live.imgix.net/img/dd_logo_70x75.png @@ -26,4 +26,4 @@ name: datadog-operator sources: - https://app.datadoghq.com/account/settings#agent/kubernetes - https://github.com/DataDog/datadog-agent -version: 1.5.2 +version: 1.6.0 diff --git a/charts/datadog/datadog-operator/README.md b/charts/datadog/datadog-operator/README.md index 6343ff3e0..1bca7e62d 100644 --- a/charts/datadog/datadog-operator/README.md +++ b/charts/datadog/datadog-operator/README.md @@ -1,6 +1,6 @@ # Datadog Operator -![Version: 1.5.2](https://img.shields.io/badge/Version-1.5.2-informational?style=flat-square) ![AppVersion: 1.4.0](https://img.shields.io/badge/AppVersion-1.4.0-informational?style=flat-square) +![Version: 1.6.0](https://img.shields.io/badge/Version-1.6.0-informational?style=flat-square) ![AppVersion: 1.5.0](https://img.shields.io/badge/AppVersion-1.5.0-informational?style=flat-square) ## Values @@ -14,6 +14,7 @@ | collectOperatorMetrics | bool | `true` | Configures an openmetrics check to collect operator metrics | | containerSecurityContext | object | `{}` | A security context defines privileges and access control settings for a container. | | datadogAgent.enabled | bool | `true` | Enables Datadog Agent controller | +| datadogAgentProfile.enabled | bool | `false` | If true, enables DatadogAgentProfile controller (beta). Requires v1.5.0+ | | datadogCRDs.crds.datadogAgents | bool | `true` | Set to true to deploy the DatadogAgents CRD | | datadogCRDs.crds.datadogMetrics | bool | `true` | Set to true to deploy the DatadogMetrics CRD | | datadogCRDs.crds.datadogMonitors | bool | `true` | Set to true to deploy the DatadogMonitors CRD | @@ -30,7 +31,7 @@ | fullnameOverride | string | `""` | | | image.pullPolicy | string | `"IfNotPresent"` | Define the pullPolicy for Datadog Operator image | | image.repository | string | `"gcr.io/datadoghq/operator"` | Repository to use for Datadog Operator image | -| image.tag | string | `"1.4.0"` | Define the Datadog Operator version to use | +| image.tag | string | `"1.5.0"` | Define the Datadog Operator version to use | | imagePullSecrets | list | `[]` | Datadog Operator repository pullSecret (ex: specify docker registry credentials) | | installCRDs | bool | `true` | Set to true to deploy the Datadog's CRDs | | introspection.enabled | bool | `false` | If true, enables introspection feature (beta). Requires v1.4.0+ | @@ -121,7 +122,7 @@ You can update with the following: ``` helm upgrade \ datadog-operator datadog/datadog-operator \ - --set image.tag=1.4.0 \ + --set image.tag=1.5.0 \ --set datadogCRDs.migration.datadogAgents.version=v2alpha1 \ --set datadogCRDs.migration.datadogAgents.useCertManager=true \ --set datadogCRDs.migration.datadogAgents.conversionWebhook.enabled=true diff --git a/charts/datadog/datadog-operator/README.md.gotmpl b/charts/datadog/datadog-operator/README.md.gotmpl index 1a869be84..e7c017ca9 100644 --- a/charts/datadog/datadog-operator/README.md.gotmpl +++ b/charts/datadog/datadog-operator/README.md.gotmpl @@ -68,7 +68,7 @@ You can update with the following: ``` helm upgrade \ datadog-operator datadog/datadog-operator \ - --set image.tag=1.4.0 \ + --set image.tag=1.5.0 \ --set datadogCRDs.migration.datadogAgents.version=v2alpha1 \ --set datadogCRDs.migration.datadogAgents.useCertManager=true \ --set datadogCRDs.migration.datadogAgents.conversionWebhook.enabled=true diff --git a/charts/datadog/datadog-operator/charts/datadog-crds/CHANGELOG.md b/charts/datadog/datadog-operator/charts/datadog-crds/CHANGELOG.md index 4df1b2ae0..06fc73759 100644 --- a/charts/datadog/datadog-operator/charts/datadog-crds/CHANGELOG.md +++ b/charts/datadog/datadog-operator/charts/datadog-crds/CHANGELOG.md @@ -1,5 +1,8 @@ # Changelog +## 1.5.0 +* Update CRDs from Datadog Operator v1.5.0 tag. + ## 1.4.0 * Update CRDs from Datadog Operator v1.4.0 tag. diff --git a/charts/datadog/datadog-operator/charts/datadog-crds/Chart.yaml b/charts/datadog/datadog-operator/charts/datadog-crds/Chart.yaml index 3172ec144..3a5eda3fe 100644 --- a/charts/datadog/datadog-operator/charts/datadog-crds/Chart.yaml +++ b/charts/datadog/datadog-operator/charts/datadog-crds/Chart.yaml @@ -15,4 +15,4 @@ sources: - https://app.datadoghq.com/account/settings#agent/kubernetes - https://github.com/DataDog/datadog-operator - https://docs.datadoghq.com/agent/cluster_agent/external_metrics -version: 1.4.0 +version: 1.5.0 diff --git a/charts/datadog/datadog-operator/charts/datadog-crds/README.md b/charts/datadog/datadog-operator/charts/datadog-crds/README.md index 8cf2d9663..d139c5c01 100644 --- a/charts/datadog/datadog-operator/charts/datadog-crds/README.md +++ b/charts/datadog/datadog-operator/charts/datadog-crds/README.md @@ -1,6 +1,6 @@ # Datadog CRDs -![Version: 1.4.0](https://img.shields.io/badge/Version-1.4.0-informational?style=flat-square) ![AppVersion: 1](https://img.shields.io/badge/AppVersion-1-informational?style=flat-square) +![Version: 1.5.0](https://img.shields.io/badge/Version-1.5.0-informational?style=flat-square) ![AppVersion: 1](https://img.shields.io/badge/AppVersion-1-informational?style=flat-square) This chart was designed to allow other "datadog" charts to share `CustomResourceDefinitions` such as the `DatadogMetric`. @@ -22,6 +22,7 @@ But the recommended Kubernetes versions are `1.16+`. | Key | Type | Default | Description | |-----|------|---------|-------------| +| crds.datadogAgentProfiles | bool | `false` | Set to true to deploy the DatadogAgentProfiles CRD | | crds.datadogAgents | bool | `false` | Set to true to deploy the DatadogAgents CRD | | crds.datadogMetrics | bool | `false` | Set to true to deploy the DatadogMetrics CRD | | crds.datadogMonitors | bool | `false` | Set to true to deploy the DatadogMonitors CRD | diff --git a/charts/datadog/datadog-operator/charts/datadog-crds/templates/datadoghq.com_datadogagentprofiles_v1.yaml b/charts/datadog/datadog-operator/charts/datadog-crds/templates/datadoghq.com_datadogagentprofiles_v1.yaml new file mode 100644 index 000000000..b8d5cb075 --- /dev/null +++ b/charts/datadog/datadog-operator/charts/datadog-crds/templates/datadoghq.com_datadogagentprofiles_v1.yaml @@ -0,0 +1,116 @@ +{{- if and .Values.crds.datadogAgentProfiles (semverCompare ">1.21-0" .Capabilities.KubeVersion.GitVersion ) }} + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.6.1 + creationTimestamp: null + name: datadogagentprofiles.datadoghq.com + labels: + helm.sh/chart: '{{ include "datadog-crds.chart" . }}' + app.kubernetes.io/managed-by: '{{ .Release.Service }}' + app.kubernetes.io/name: '{{ include "datadog-crds.name" . }}' + app.kubernetes.io/instance: '{{ .Release.Name }}' +spec: + group: datadoghq.com + names: + kind: DatadogAgentProfile + listKind: DatadogAgentProfileList + plural: datadogagentprofiles + shortNames: + - dap + singular: datadogagentprofile + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: DatadogAgentProfile is the Schema for the datadogagentprofiles API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: DatadogAgentProfileSpec defines the desired state of DatadogAgentProfile + properties: + config: + properties: + override: + additionalProperties: + properties: + containers: + additionalProperties: + properties: + resources: + description: ResourceRequirements describes the compute resource requirements. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + type: object + type: object + type: object + type: object + profileAffinity: + properties: + profileNodeAffinity: + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: object + status: + description: DatadogAgentProfileStatus defines the observed state of DatadogAgentProfile + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +{{- end }} diff --git a/charts/datadog/datadog-operator/charts/datadog-crds/templates/datadoghq.com_datadogagentprofiles_v1beta1.yaml b/charts/datadog/datadog-operator/charts/datadog-crds/templates/datadoghq.com_datadogagentprofiles_v1beta1.yaml new file mode 100644 index 000000000..435cde5bd --- /dev/null +++ b/charts/datadog/datadog-operator/charts/datadog-crds/templates/datadoghq.com_datadogagentprofiles_v1beta1.yaml @@ -0,0 +1,117 @@ +{{- if and .Values.crds.datadogAgentProfiles (semverCompare "<=1.21-0" .Capabilities.KubeVersion.GitVersion ) }} + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.6.1 + creationTimestamp: null + name: datadogagentprofiles.datadoghq.com + labels: + helm.sh/chart: '{{ include "datadog-crds.chart" . }}' + app.kubernetes.io/managed-by: '{{ .Release.Service }}' + app.kubernetes.io/name: '{{ include "datadog-crds.name" . }}' + app.kubernetes.io/instance: '{{ .Release.Name }}' +spec: + group: datadoghq.com + names: + kind: DatadogAgentProfile + listKind: DatadogAgentProfileList + plural: datadogagentprofiles + shortNames: + - dap + singular: datadogagentprofile + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: DatadogAgentProfile is the Schema for the datadogagentprofiles API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: DatadogAgentProfileSpec defines the desired state of DatadogAgentProfile + properties: + config: + properties: + override: + additionalProperties: + properties: + containers: + additionalProperties: + properties: + resources: + description: ResourceRequirements describes the compute resource requirements. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + type: object + type: object + type: object + type: object + profileAffinity: + properties: + profileNodeAffinity: + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: object + status: + description: DatadogAgentProfileStatus defines the observed state of DatadogAgentProfile + type: object + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +{{- end }} diff --git a/charts/datadog/datadog-operator/charts/datadog-crds/templates/datadoghq.com_datadogagents_v1.yaml b/charts/datadog/datadog-operator/charts/datadog-crds/templates/datadoghq.com_datadogagents_v1.yaml index a9195ca47..25a240bd9 100644 --- a/charts/datadog/datadog-operator/charts/datadog-crds/templates/datadoghq.com_datadogagents_v1.yaml +++ b/charts/datadog/datadog-operator/charts/datadog-crds/templates/datadoghq.com_datadogagents_v1.yaml @@ -5919,6 +5919,25 @@ spec: format: int32 type: integer type: object + instrumentation: + properties: + disabledNamespaces: + items: + type: string + type: array + x-kubernetes-list-type: set + enabled: + type: boolean + enabledNamespaces: + items: + type: string + type: array + x-kubernetes-list-type: set + libVersions: + additionalProperties: + type: string + type: object + type: object unixDomainSocketConfig: properties: enabled: @@ -6128,6 +6147,17 @@ spec: wpaController: type: boolean type: object + helmCheck: + properties: + collectEvents: + type: boolean + enabled: + type: boolean + valuesAsTags: + additionalProperties: + type: string + type: object + type: object kubeStateMetricsCore: properties: conf: @@ -6414,6 +6444,86 @@ spec: url: type: string type: object + fips: + properties: + customFIPSConfig: + properties: + configData: + type: string + configMap: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-map-keys: + - key + x-kubernetes-list-type: map + name: + type: string + type: object + type: object + enabled: + type: boolean + image: + properties: + jmxEnabled: + type: boolean + name: + type: string + pullPolicy: + type: string + pullSecrets: + items: + properties: + name: + type: string + type: object + type: array + tag: + type: string + type: object + localAddress: + type: string + port: + format: int32 + type: integer + portRange: + format: int32 + type: integer + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + useHTTPS: + type: boolean + type: object kubelet: properties: agentCAPath: diff --git a/charts/datadog/datadog-operator/charts/datadog-crds/templates/datadoghq.com_datadogagents_v1beta1.yaml b/charts/datadog/datadog-operator/charts/datadog-crds/templates/datadoghq.com_datadogagents_v1beta1.yaml index a27df90f4..15bf1efd0 100644 --- a/charts/datadog/datadog-operator/charts/datadog-crds/templates/datadoghq.com_datadogagents_v1beta1.yaml +++ b/charts/datadog/datadog-operator/charts/datadog-crds/templates/datadoghq.com_datadogagents_v1beta1.yaml @@ -5908,6 +5908,25 @@ spec: format: int32 type: integer type: object + instrumentation: + properties: + disabledNamespaces: + items: + type: string + type: array + x-kubernetes-list-type: set + enabled: + type: boolean + enabledNamespaces: + items: + type: string + type: array + x-kubernetes-list-type: set + libVersions: + additionalProperties: + type: string + type: object + type: object unixDomainSocketConfig: properties: enabled: @@ -6117,6 +6136,17 @@ spec: wpaController: type: boolean type: object + helmCheck: + properties: + collectEvents: + type: boolean + enabled: + type: boolean + valuesAsTags: + additionalProperties: + type: string + type: object + type: object kubeStateMetricsCore: properties: conf: @@ -6403,6 +6433,86 @@ spec: url: type: string type: object + fips: + properties: + customFIPSConfig: + properties: + configData: + type: string + configMap: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-map-keys: + - key + x-kubernetes-list-type: map + name: + type: string + type: object + type: object + enabled: + type: boolean + image: + properties: + jmxEnabled: + type: boolean + name: + type: string + pullPolicy: + type: string + pullSecrets: + items: + properties: + name: + type: string + type: object + type: array + tag: + type: string + type: object + localAddress: + type: string + port: + format: int32 + type: integer + portRange: + format: int32 + type: integer + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + useHTTPS: + type: boolean + type: object kubelet: properties: agentCAPath: diff --git a/charts/datadog/datadog-operator/charts/datadog-crds/templates/datadoghq.com_datadogmonitors_v1.yaml b/charts/datadog/datadog-operator/charts/datadog-crds/templates/datadoghq.com_datadogmonitors_v1.yaml index be9c6e3f4..3b7f85b5d 100644 --- a/charts/datadog/datadog-operator/charts/datadog-crds/templates/datadoghq.com_datadogmonitors_v1.yaml +++ b/charts/datadog/datadog-operator/charts/datadog-crds/templates/datadoghq.com_datadogmonitors_v1.yaml @@ -107,6 +107,9 @@ spec: notifyNoData: description: A Boolean indicating whether this monitor notifies when data stops reporting. type: boolean + onMissingData: + description: An enum that controls how groups or monitors are treated if an evaluation does not return data points. The default option results in different behavior depending on the monitor query type. For monitors using Count queries, an empty monitor evaluation is treated as 0 and is compared to the threshold conditions. For monitors using any query type other than Count, for example Gauge, Measure, or Rate, the monitor shows the last known status. This option is only available for APM Trace Analytics, Audit Trail, CI, Error Tracking, Event, Logs, and RUM monitors + type: string renotifyInterval: description: The number of minutes after the last notification before a monitor re-notifies on the current status. It only re-notifies if it’s not resolved. format: int64 diff --git a/charts/datadog/datadog-operator/charts/datadog-crds/templates/datadoghq.com_datadogmonitors_v1beta1.yaml b/charts/datadog/datadog-operator/charts/datadog-crds/templates/datadoghq.com_datadogmonitors_v1beta1.yaml index 0f483ae1a..e432c53d9 100644 --- a/charts/datadog/datadog-operator/charts/datadog-crds/templates/datadoghq.com_datadogmonitors_v1beta1.yaml +++ b/charts/datadog/datadog-operator/charts/datadog-crds/templates/datadoghq.com_datadogmonitors_v1beta1.yaml @@ -107,6 +107,9 @@ spec: notifyNoData: description: A Boolean indicating whether this monitor notifies when data stops reporting. type: boolean + onMissingData: + description: An enum that controls how groups or monitors are treated if an evaluation does not return data points. The default option results in different behavior depending on the monitor query type. For monitors using Count queries, an empty monitor evaluation is treated as 0 and is compared to the threshold conditions. For monitors using any query type other than Count, for example Gauge, Measure, or Rate, the monitor shows the last known status. This option is only available for APM Trace Analytics, Audit Trail, CI, Error Tracking, Event, Logs, and RUM monitors + type: string renotifyInterval: description: The number of minutes after the last notification before a monitor re-notifies on the current status. It only re-notifies if it’s not resolved. format: int64 diff --git a/charts/datadog/datadog-operator/charts/datadog-crds/templates/datadoghq.com_datadogslos_v1beta1.yaml b/charts/datadog/datadog-operator/charts/datadog-crds/templates/datadoghq.com_datadogslos_v1beta1.yaml new file mode 100644 index 000000000..b8c23d948 --- /dev/null +++ b/charts/datadog/datadog-operator/charts/datadog-crds/templates/datadoghq.com_datadogslos_v1beta1.yaml @@ -0,0 +1,206 @@ +{{- if and .Values.crds.datadogSLOs (semverCompare "<=1.21-0" .Capabilities.KubeVersion.GitVersion ) }} + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.6.1 + creationTimestamp: null + name: datadogslos.datadoghq.com + labels: + helm.sh/chart: '{{ include "datadog-crds.chart" . }}' + app.kubernetes.io/managed-by: '{{ .Release.Service }}' + app.kubernetes.io/name: '{{ include "datadog-crds.name" . }}' + app.kubernetes.io/instance: '{{ .Release.Name }}' +spec: + additionalPrinterColumns: + - JSONPath: .status.id + name: id + type: string + - JSONPath: .status.syncStatus + name: sync status + type: string + - JSONPath: .metadata.creationTimestamp + name: age + type: date + group: datadoghq.com + names: + kind: DatadogSLO + listKind: DatadogSLOList + plural: datadogslos + shortNames: + - ddslo + singular: datadogslo + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: DatadogSLO allows a user to define and manage datadog SLOs from Kubernetes cluster. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + controllerOptions: + description: ControllerOptions are the optional parameters in the DatadogSLO controller + properties: + disableRequiredTags: + description: DisableRequiredTags disables the automatic addition of required tags to SLOs. + type: boolean + type: object + description: + description: Description is a user-defined description of the service level objective. Always included in service level objective responses (but may be null). Optional in create/update requests. + type: string + groups: + description: Groups is a list of (up to 100) monitor groups that narrow the scope of a monitor service level objective. Included in service level objective responses if it is not empty. Optional in create/update requests for monitor service level objectives, but may only be used when the length of the monitor_ids field is one. + items: + type: string + type: array + x-kubernetes-list-type: set + monitorIDs: + description: MonitorIDs is a list of monitor IDs that defines the scope of a monitor service level objective. Required if type is monitor. + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: set + name: + description: Name is the name of the service level objective. + type: string + query: + description: Query is the query for a metric-based SLO. Required if type is metric. Note that only the `sum by` aggregator is allowed, which sums all request counts. `Average`, `max`, nor `min` request aggregators are not supported. + properties: + denominator: + description: Denominator is a Datadog metric query for total (valid) events. + type: string + numerator: + description: Numerator is a Datadog metric query for good events. + type: string + required: + - denominator + - numerator + type: object + tags: + description: 'Tags is a list of tags to associate with your service level objective. This can help you categorize and filter service level objectives in the service level objectives page of the UI. Note: it''s not currently possible to filter by these tags when querying via the API.' + items: + type: string + type: array + x-kubernetes-list-type: set + targetThreshold: + anyOf: + - type: integer + - type: string + description: TargetThreshold is the target threshold such that when the service level indicator is above this threshold over the given timeframe, the objective is being met. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + timeframe: + description: The SLO time window options. + type: string + type: + description: Type is the type of the service level objective. + type: string + warningThreshold: + anyOf: + - type: integer + - type: string + description: WarningThreshold is a optional warning threshold such that when the service level indicator is below this value for the given threshold, but above the target threshold, the objective appears in a "warning" state. This value must be greater than the target threshold. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - name + - targetThreshold + - timeframe + - type + type: object + status: + description: DatadogSLOStatus defines the observed state of a DatadogSLO. + properties: + conditions: + description: Conditions represents the latest available observations of the state of a DatadogSLO. + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + created: + description: Created is the time the SLO was created. + format: date-time + type: string + creator: + description: Creator is the identity of the SLO creator. + type: string + currentHash: + description: CurrentHash tracks the hash of the current DatadogSLOSpec to know if the Spec has changed and needs an update. + type: string + id: + description: ID is the SLO ID generated in Datadog. + type: string + lastForceSyncTime: + description: LastForceSyncTime is the last time the API SLO was last force synced with the DatadogSLO resource. + format: date-time + type: string + syncStatus: + description: SyncStatus shows the health of syncing the SLO state to Datadog. + type: string + type: object + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +{{- end }} diff --git a/charts/datadog/datadog-operator/charts/datadog-crds/update-crds.sh b/charts/datadog/datadog-operator/charts/datadog-crds/update-crds.sh index c1ff364ce..a0f5be4e9 100644 --- a/charts/datadog/datadog-operator/charts/datadog-crds/update-crds.sh +++ b/charts/datadog/datadog-operator/charts/datadog-crds/update-crds.sh @@ -59,4 +59,7 @@ download_crd "$DATADOG_OPERATOR_REPO" "$DATADOG_OPERATOR_TAG" datadogagents data download_crd "$DATADOG_OPERATOR_REPO" "$DATADOG_OPERATOR_TAG" datadogagents datadogAgents v1 download_crd "$DATADOG_OPERATOR_REPO" "$DATADOG_OPERATOR_TAG" datadogmonitors datadogMonitors v1beta1 download_crd "$DATADOG_OPERATOR_REPO" "$DATADOG_OPERATOR_TAG" datadogmonitors datadogMonitors v1 +download_crd "$DATADOG_OPERATOR_REPO" "$DATADOG_OPERATOR_TAG" datadogslos datadogSLOs v1beta1 download_crd "$DATADOG_OPERATOR_REPO" "$DATADOG_OPERATOR_TAG" datadogslos datadogSLOs v1 +download_crd "$DATADOG_OPERATOR_REPO" "$DATADOG_OPERATOR_TAG" datadogagentprofiles datadogAgentProfiles v1beta1 +download_crd "$DATADOG_OPERATOR_REPO" "$DATADOG_OPERATOR_TAG" datadogagentprofiles datadogAgentProfiles v1 diff --git a/charts/datadog/datadog-operator/charts/datadog-crds/values.yaml b/charts/datadog/datadog-operator/charts/datadog-crds/values.yaml index 696f33411..3151e3d94 100644 --- a/charts/datadog/datadog-operator/charts/datadog-crds/values.yaml +++ b/charts/datadog/datadog-operator/charts/datadog-crds/values.yaml @@ -11,6 +11,8 @@ crds: datadogMonitors: false # crds.datadogSLOs -- Set to true to deploy the DatadogSLO CRD datadogSLOs: false + # crds.datadogAgentProfiles -- Set to true to deploy the DatadogAgentProfiles CRD + datadogAgentProfiles: false migration: datadogAgents: diff --git a/charts/datadog/datadog-operator/templates/clusterrole.yaml b/charts/datadog/datadog-operator/templates/clusterrole.yaml index 2699c37c7..15bbfb51b 100644 --- a/charts/datadog/datadog-operator/templates/clusterrole.yaml +++ b/charts/datadog/datadog-operator/templates/clusterrole.yaml @@ -696,4 +696,38 @@ rules: verbs: - list - watch +{{- if .Values.enableDatadogAgentProfile }} +- apiGroups: + - "" + resources: + - nodes + verbs: + - patch +- apiGroups: + - datadoghq.com + resources: + - datadogagentprofiles + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - datadoghq.com + resources: + - datadogagentprofiles/status + verbs: + - get + - patch + - update +- apiGroups: + - datadoghq.com + resources: + - datadogagentprofiles/finalizers + verbs: + - update +{{- end }} {{- end -}} diff --git a/charts/datadog/datadog-operator/templates/deployment.yaml b/charts/datadog/datadog-operator/templates/deployment.yaml index 0c2b45f7f..e8908b26f 100644 --- a/charts/datadog/datadog-operator/templates/deployment.yaml +++ b/charts/datadog/datadog-operator/templates/deployment.yaml @@ -110,6 +110,9 @@ spec: {{- end }} {{- if (semverCompare ">=1.4.0" .Values.image.tag) }} - "-introspectionEnabled={{ .Values.introspection.enabled }}" + {{- end }} + {{- if (semverCompare ">=1.5.0" .Values.image.tag) }} + - "-datadogAgentProfileEnabled={{ .Values.datadogAgentProfile.enabled }}" {{- end }} - "-datadogMonitorEnabled={{ .Values.datadogMonitor.enabled }}" {{- if (semverCompare ">=1.0.0-rc.13" .Values.image.tag) }} diff --git a/charts/datadog/datadog-operator/values.yaml b/charts/datadog/datadog-operator/values.yaml index af9863a35..415edfacd 100644 --- a/charts/datadog/datadog-operator/values.yaml +++ b/charts/datadog/datadog-operator/values.yaml @@ -43,7 +43,7 @@ image: # image.repository -- Repository to use for Datadog Operator image repository: gcr.io/datadoghq/operator # image.tag -- Define the Datadog Operator version to use - tag: 1.4.0 + tag: 1.5.0 # image.pullPolicy -- Define the pullPolicy for Datadog Operator image pullPolicy: IfNotPresent # imagePullSecrets -- Datadog Operator repository pullSecret (ex: specify docker registry credentials) @@ -61,6 +61,9 @@ maximumGoroutines: introspection: # introspection.enabled -- If true, enables introspection feature (beta). Requires v1.4.0+ enabled: false +datadogAgentProfile: +# datadogAgentProfile.enabled -- If true, enables DatadogAgentProfile controller (beta). Requires v1.5.0+ + enabled: false # supportExtendedDaemonset -- If true, supports using ExtendedDaemonSet CRD supportExtendedDaemonset: "false" # operatorMetricsEnabled -- Enable forwarding of Datadog Operator metrics and events to Datadog. diff --git a/charts/datadog/datadog/CHANGELOG.md b/charts/datadog/datadog/CHANGELOG.md index b0010380d..03a2449cf 100644 --- a/charts/datadog/datadog/CHANGELOG.md +++ b/charts/datadog/datadog/CHANGELOG.md @@ -1,5 +1,12 @@ # Datadog changelog +## 3.59.4 + +* Add language detection enable option for `APM` instrumentation. + +## 3.59.3 +* Add `contimage-intake.datadoghq.com` & `contlcycle-intake.datadoghq.com` endpoints to the `Agent` cilium network policy. + ## 3.59.2 * Disable language detection reporting by default in Cluster Agent with Agent 7.52+. diff --git a/charts/datadog/datadog/Chart.yaml b/charts/datadog/datadog/Chart.yaml index 8b3133ae8..57229921e 100644 --- a/charts/datadog/datadog/Chart.yaml +++ b/charts/datadog/datadog/Chart.yaml @@ -19,4 +19,4 @@ name: datadog sources: - https://app.datadoghq.com/account/settings#agent/kubernetes - https://github.com/DataDog/datadog-agent -version: 3.59.2 +version: 3.59.4 diff --git a/charts/datadog/datadog/README.md b/charts/datadog/datadog/README.md index a2f793cea..9089d89dd 100644 --- a/charts/datadog/datadog/README.md +++ b/charts/datadog/datadog/README.md @@ -1,6 +1,6 @@ # Datadog -![Version: 3.59.2](https://img.shields.io/badge/Version-3.59.2-informational?style=flat-square) ![AppVersion: 7](https://img.shields.io/badge/AppVersion-7-informational?style=flat-square) +![Version: 3.59.4](https://img.shields.io/badge/Version-3.59.4-informational?style=flat-square) ![AppVersion: 7](https://img.shields.io/badge/AppVersion-7-informational?style=flat-square) [Datadog](https://www.datadoghq.com/) is a hosted infrastructure monitoring platform. This chart adds the Datadog Agent to all nodes in your cluster via a DaemonSet. It also optionally depends on the [kube-state-metrics chart](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-state-metrics). For more information about monitoring Kubernetes with Datadog, please refer to the [Datadog documentation website](https://docs.datadoghq.com/agent/basic_agent_usage/kubernetes/). @@ -665,6 +665,7 @@ helm install \ | datadog.apm.instrumentation.disabledNamespaces | list | `[]` | Disable injecting the Datadog APM libraries into pods in specific namespaces (beta). | | datadog.apm.instrumentation.enabled | bool | `false` | Enable injecting the Datadog APM libraries into all pods in the cluster (beta). | | datadog.apm.instrumentation.enabledNamespaces | list | `[]` | Enable injecting the Datadog APM libraries into pods in specific namespaces (beta). | +| datadog.apm.instrumentation.language_detection.enabled | bool | `true` | Run language detection to automatically detect languages of user workloads (beta). | | datadog.apm.instrumentation.libVersions | object | `{}` | Inject specific version of tracing libraries with Single Step Instrumentation (beta). | | datadog.apm.port | int | `8126` | Override the trace Agent port | | datadog.apm.portEnabled | bool | `false` | Enable APM over TCP communication (hostPort 8126 by default) | diff --git a/charts/datadog/datadog/templates/_components-common-env.yaml b/charts/datadog/datadog/templates/_components-common-env.yaml index 3c67bd01c..6d6394895 100644 --- a/charts/datadog/datadog/templates/_components-common-env.yaml +++ b/charts/datadog/datadog/templates/_components-common-env.yaml @@ -1,9 +1,10 @@ # The purpose of this template is to define a minimal set of environment # variables shared between components: agent, cluster-agent {{- define "components-common-env" -}} -# Workaround for issue in `7.52.0` default activating language detection +- name: DD_LANGUAGE_DETECTION_ENABLED + value: {{ include "language-detection-enabled" . | quote }} - name: DD_LANGUAGE_DETECTION_REPORTING_ENABLED - value: "false" + value: {{ include "language-detection-enabled" . | quote }} {{- if .Values.datadog.secretBackend.command }} - name: DD_SECRET_BACKEND_COMMAND value: {{ .Values.datadog.secretBackend.command | quote }} diff --git a/charts/datadog/datadog/templates/_container-process-agent.yaml b/charts/datadog/datadog/templates/_container-process-agent.yaml index b69179bbc..1fd312c5c 100644 --- a/charts/datadog/datadog/templates/_container-process-agent.yaml +++ b/charts/datadog/datadog/templates/_container-process-agent.yaml @@ -52,6 +52,8 @@ {{- end }} - name: DD_ORCHESTRATOR_EXPLORER_ENABLED value: {{ (include "should-enable-k8s-resource-monitoring" .) | quote }} + - name: DD_PROCESS_AGENT_PROCESS_COLLECTION_ENABLED + value: {{ include "language-detection-enabled" . | quote }} {{- include "additional-env-entries" .Values.agents.containers.processAgent.env | indent 4 }} {{- include "additional-env-dict-entries" .Values.agents.containers.processAgent.envDict | indent 4 }} volumeMounts: diff --git a/charts/datadog/datadog/templates/_helpers.tpl b/charts/datadog/datadog/templates/_helpers.tpl index 86fc75c98..78d6298f9 100644 --- a/charts/datadog/datadog/templates/_helpers.tpl +++ b/charts/datadog/datadog/templates/_helpers.tpl @@ -903,4 +903,15 @@ Create RBACs for custom resources {{- end -}} {{- end -}} +{{/* + Return true if language detection feature is enabled +*/}} +{{- define "language-detection-enabled" -}} + {{- if and .Values.datadog.apm.instrumentation.enabled .Values.datadog.apm.instrumentation.language_detection.enabled -}} + true + {{- else -}} + false + {{- end -}} +{{- end -}} + diff --git a/charts/datadog/datadog/templates/agent-cilium-network-policy.yaml b/charts/datadog/datadog/templates/agent-cilium-network-policy.yaml index 7e7a4c09a..480ac0a7e 100644 --- a/charts/datadog/datadog/templates/agent-cilium-network-policy.yaml +++ b/charts/datadog/datadog/templates/agent-cilium-network-policy.yaml @@ -89,6 +89,8 @@ specs: - matchName: "api.{{ $.Values.datadog.site }}" - matchName: "agent-intake.logs.{{ $.Values.datadog.site }}" - matchName: "agent-http-intake.logs.{{ $.Values.datadog.site }}" + - matchName: "contimage-intake.{{ $.Values.datadog.site }}" + - matchName: "contlcycle-intake.{{ $.Values.datadog.site }}" - matchName: "process.{{ $.Values.datadog.site }}" - matchName: "orchestrator.{{ $.Values.datadog.site }}" - matchName: "instrumentation-telemetry-intake.{{ $.Values.datadog.site }}" @@ -105,6 +107,8 @@ specs: - matchName: "api.datadoghq.com" - matchName: "agent-intake.logs.datadoghq.com" - matchName: "agent-http-intake.logs.datadoghq.com" + - matchName: "contimage-intake.datadoghq.com" + - matchName: "contlcycle-intake.datadoghq.com" - matchName: "process.datadoghq.com" - matchName: "orchestrator.datadoghq.com" - matchName: "instrumentation-telemetry-intake.datadoghq.com" diff --git a/charts/datadog/datadog/templates/cluster-agent-deployment.yaml b/charts/datadog/datadog/templates/cluster-agent-deployment.yaml index cd7bd026f..e40075932 100644 --- a/charts/datadog/datadog/templates/cluster-agent-deployment.yaml +++ b/charts/datadog/datadog/templates/cluster-agent-deployment.yaml @@ -308,6 +308,8 @@ spec: - name: DD_ORCHESTRATOR_EXPLORER_CONTAINER_SCRUBBING_ENABLED value: {{ .Values.datadog.orchestratorExplorer.container_scrubbing.enabled | quote }} {{- end }} + - name: DD_CLUSTER_AGENT_LANGUAGE_DETECTION_PATCHER_ENABLED + value: {{ include "language-detection-enabled" . | quote }} {{- if eq (include "should-enable-security-agent" .) "true" }} - name: DD_COMPLIANCE_CONFIG_ENABLED value: {{ .Values.datadog.securityAgent.compliance.enabled | quote }} diff --git a/charts/datadog/datadog/templates/cluster-agent-rbac.yaml b/charts/datadog/datadog/templates/cluster-agent-rbac.yaml index a414f640f..d9e87ff5b 100644 --- a/charts/datadog/datadog/templates/cluster-agent-rbac.yaml +++ b/charts/datadog/datadog/templates/cluster-agent-rbac.yaml @@ -115,6 +115,17 @@ rules: - "get" - "watch" {{- end }} +{{- if and .Values.datadog.apm.instrumentation.enabled .Values.datadog.apm.instrumentation.language_detection.enabled }} +- apiGroups: + - "apps" + resources: + - deployments + verbs: + - list + - get + - watch + - patch +{{- end }} {{- if eq (include "should-enable-k8s-resource-monitoring" .) "true" }} - apiGroups: # to get the kube-system namespace UID and generate a cluster ID - "" diff --git a/charts/datadog/datadog/values.yaml b/charts/datadog/datadog/values.yaml index f39b58904..9fad54d16 100644 --- a/charts/datadog/datadog/values.yaml +++ b/charts/datadog/datadog/values.yaml @@ -479,6 +479,13 @@ datadog: # datadog.apm.instrumentation.libVersions -- Inject specific version of tracing libraries with Single Step Instrumentation (beta). libVersions: {} + + # Language detection currently only detects languages and adds them as annotations on deployments, but doesn't use these languages for injecting libraries to applicative pods. + # It requires Agent 7.52+ and Cluster Agent 7.52+ + language_detection: + # datadog.apm.instrumentation.language_detection.enabled -- Run language detection to automatically detect languages of user workloads (beta). + enabled: true + ## OTLP ingest related configuration otlp: receiver: diff --git a/charts/dynatrace/dynatrace-operator/Chart.yaml b/charts/dynatrace/dynatrace-operator/Chart.yaml index 5f4355578..a020016a5 100644 --- a/charts/dynatrace/dynatrace-operator/Chart.yaml +++ b/charts/dynatrace/dynatrace-operator/Chart.yaml @@ -4,7 +4,7 @@ annotations: catalog.cattle.io/kube-version: '>=1.19.0-0' catalog.cattle.io/release-name: dynatrace-operator apiVersion: v2 -appVersion: 0.15.0 +appVersion: 1.0.0 description: The Dynatrace Operator Helm chart for Kubernetes and OpenShift home: https://www.dynatrace.com/ icon: https://assets.dynatrace.com/global/resources/Signet_Logo_RGB_CP_512x512px.png @@ -20,4 +20,4 @@ name: dynatrace-operator sources: - https://github.com/Dynatrace/dynatrace-operator type: application -version: 0.15.0 +version: 1.0.0 diff --git a/charts/dynatrace/dynatrace-operator/README.md b/charts/dynatrace/dynatrace-operator/README.md index a9f653acf..97a98a018 100644 --- a/charts/dynatrace/dynatrace-operator/README.md +++ b/charts/dynatrace/dynatrace-operator/README.md @@ -15,6 +15,8 @@ Install the Dynatrace Operator via Helm by running the following commands. > For instructions on how to install the dynatrace-operator on Openshift, head to the > [official help page](https://www.dynatrace.com/support/help/shortlink/k8s-helm) +#### For versions older than 0.15.0 + Add `dynatrace` helm repository: ```console @@ -27,6 +29,14 @@ Install `dynatrace-operator` helm chart and create the corresponding `dynatrace` helm install dynatrace-operator dynatrace/dynatrace-operator -n dynatrace --create-namespace --atomic ``` +#### For versions 0.15.0 and after + +Install `dynatrace-operator` helm chart using the OCI repository and create the corresponding `dynatrace` namespace: + +```console +helm install dynatrace-operator oci://public.ecr.aws/dynatrace/dynatrace-operator -n dynatrace --create-namespace --atomic +``` + ## Uninstall chart > Full instructions can be found in the [official help page](https://www.dynatrace.com/support/help/shortlink/k8s-helm#uninstall-dynatrace-operator) diff --git a/charts/dynatrace/dynatrace-operator/questions.yml b/charts/dynatrace/dynatrace-operator/questions.yml index 0792ef84c..a2291a19e 100644 --- a/charts/dynatrace/dynatrace-operator/questions.yml +++ b/charts/dynatrace/dynatrace-operator/questions.yml @@ -13,7 +13,7 @@ questions: - variable: image label: "Set a custom image for operator components" - description: "Set a custom image for operator. Defaults to docker.io/dynatrace/dynatrace-operator" + description: "Set a custom image for operator. Defaults to public.ecr.aws/dynatrace/dynatrace-operator" default: "" type: string group: "Global Configuration" diff --git a/charts/dynatrace/dynatrace-operator/templates/Common/crd/dynatrace-operator-crd.yaml b/charts/dynatrace/dynatrace-operator/templates/Common/crd/dynatrace-operator-crd.yaml index 01d408cba..4bfd16bca 100644 --- a/charts/dynatrace/dynatrace-operator/templates/Common/crd/dynatrace-operator-crd.yaml +++ b/charts/dynatrace/dynatrace-operator/templates/Common/crd/dynatrace-operator-crd.yaml @@ -4,7 +4,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.13.0 + controller-gen.kubebuilder.io/version: v0.14.0 name: dynakubes.dynatrace.com spec: conversion: @@ -44,14 +44,19 @@ spec: description: DynaKube is the Schema for the DynaKube API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -66,9 +71,9 @@ spec: a new version is available type: boolean image: - description: 'Optional: the ActiveGate container image. Defaults - to the latest ActiveGate image provided by the Docker Registry - implementation from the Dynatrace environment set as API URL.' + description: |- + Optional: the ActiveGate container image. Defaults to the latest ActiveGate image provided by the Docker Registry + implementation from the Dynatrace environment set as API URL. type: string type: object apiUrl: @@ -101,15 +106,16 @@ spec: C_IDENTIFIER. type: string value: - description: 'Variable references $(VAR_NAME) are expanded - using the previously defined environment variables in - the container and any service environment variables. If - a variable cannot be resolved, the reference in the input - string will be unchanged. Double $$ are reduced to a single - $, which allows for escaping the $(VAR_NAME) syntax: i.e. + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless - of whether the variable exists or not. Defaults to "".' + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". type: string valueFrom: description: Source for the environment variable's value. @@ -122,9 +128,10 @@ spec: description: The key to select. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the ConfigMap or its @@ -135,11 +142,9 @@ spec: type: object x-kubernetes-map-type: atomic fieldRef: - description: 'Selects a field of the pod: supports metadata.name, - metadata.namespace, `metadata.labels['''']`, - `metadata.annotations['''']`, spec.nodeName, - spec.serviceAccountName, status.hostIP, status.podIP, - status.podIPs.' + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. properties: apiVersion: description: Version of the schema the FieldPath @@ -154,10 +159,9 @@ spec: type: object x-kubernetes-map-type: atomic resourceFieldRef: - description: 'Selects a resource of the container: only - resources limits and requests (limits.cpu, limits.memory, - limits.ephemeral-storage, requests.cpu, requests.memory - and requests.ephemeral-storage) are currently supported.' + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. properties: containerName: description: 'Container name: required for volumes, @@ -187,9 +191,10 @@ spec: be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the Secret or its key @@ -216,29 +221,33 @@ spec: description: Node selector to control the selection of nodes (optional) type: object priorityClassName: - description: 'Optional: If specified, indicates the pod''s priority. - Name must be defined by creating a PriorityClass object with - that name. If not specified the setting will be removed from - the DaemonSet.' + description: |- + Optional: If specified, indicates the pod's priority. Name must be defined by creating a PriorityClass object with that + name. If not specified the setting will be removed from the DaemonSet. type: string resources: description: 'Optional: define resources requests and limits for single pods' properties: claims: - description: "Claims lists the names of resources, defined - in spec.resourceClaims, that are used by this container. - \n This is an alpha field and requires enabling the DynamicResourceAllocation - feature gate. \n This field is immutable. It can only be - set for containers." + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. properties: name: - description: Name must match the name of one entry in - pod.spec.resourceClaims of the Pod where this field - is used. It makes that resource available inside a - container. + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. type: string required: - name @@ -254,8 +263,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -264,11 +274,11 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. Requests cannot exceed - Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object serviceAccountName: @@ -278,41 +288,39 @@ spec: tolerations: description: 'Optional: set tolerations for the OneAgent pods' items: - description: The pod this Toleration is attached to tolerates - any taint that matches the triple using - the matching operator . + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . properties: effect: - description: Effect indicates the taint effect to match. - Empty means match all taint effects. When specified, allowed - values are NoSchedule, PreferNoSchedule and NoExecute. + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. type: string key: - description: Key is the taint key that the toleration applies - to. Empty means match all taint keys. If the key is empty, - operator must be Exists; this combination means to match - all values and all keys. + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. type: string operator: - description: Operator represents a key's relationship to - the value. Valid operators are Exists and Equal. Defaults - to Equal. Exists is equivalent to wildcard for value, - so that a pod can tolerate all taints of a particular - category. + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. type: string tolerationSeconds: - description: TolerationSeconds represents the period of - time the toleration (which must be of effect NoExecute, - otherwise this field is ignored) tolerates the taint. - By default, it is not set, which means tolerate the taint - forever (do not evict). Zero and negative values will - be treated as 0 (evict immediately) by the system. + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. format: int64 type: integer value: - description: Value is the taint value the toleration matches - to. If the operator is Exists, the value should be empty, - otherwise just a regular string. + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. type: string type: object type: array @@ -338,7 +346,7 @@ spec: to allow access to the Dynatrace environment type: boolean kubernetesMonitoring: - description: Configuration for Kubernetes Monitoring + description: ' Configuration for Kubernetes Monitoring' properties: args: description: 'Optional: Adds additional arguments for the ActiveGate @@ -347,9 +355,9 @@ spec: type: string type: array customProperties: - description: 'Optional: Add a custom properties file by providing - it as a value or reference it from a secret If referenced from - a secret, make sure the key is called ''customProperties''' + description: |- + Optional: Add a custom properties file by providing it as a value or reference it from a secret + If referenced from a secret, make sure the key is called 'customProperties' properties: value: type: string @@ -371,15 +379,16 @@ spec: C_IDENTIFIER. type: string value: - description: 'Variable references $(VAR_NAME) are expanded - using the previously defined environment variables in - the container and any service environment variables. If - a variable cannot be resolved, the reference in the input - string will be unchanged. Double $$ are reduced to a single - $, which allows for escaping the $(VAR_NAME) syntax: i.e. + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless - of whether the variable exists or not. Defaults to "".' + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". type: string valueFrom: description: Source for the environment variable's value. @@ -392,9 +401,10 @@ spec: description: The key to select. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the ConfigMap or its @@ -405,11 +415,9 @@ spec: type: object x-kubernetes-map-type: atomic fieldRef: - description: 'Selects a field of the pod: supports metadata.name, - metadata.namespace, `metadata.labels['''']`, - `metadata.annotations['''']`, spec.nodeName, - spec.serviceAccountName, status.hostIP, status.podIP, - status.podIPs.' + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. properties: apiVersion: description: Version of the schema the FieldPath @@ -424,10 +432,9 @@ spec: type: object x-kubernetes-map-type: atomic resourceFieldRef: - description: 'Selects a resource of the container: only - resources limits and requests (limits.cpu, limits.memory, - limits.ephemeral-storage, requests.cpu, requests.memory - and requests.ephemeral-storage) are currently supported.' + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. properties: containerName: description: 'Container name: required for volumes, @@ -457,9 +464,10 @@ spec: be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the Secret or its key @@ -498,19 +506,24 @@ spec: single ActiveGate pods' properties: claims: - description: "Claims lists the names of resources, defined - in spec.resourceClaims, that are used by this container. - \n This is an alpha field and requires enabling the DynamicResourceAllocation - feature gate. \n This field is immutable. It can only be - set for containers." + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. properties: name: - description: Name must match the name of one entry in - pod.spec.resourceClaims of the Pod where this field - is used. It makes that resource available inside a - container. + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. type: string required: - name @@ -526,8 +539,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -536,11 +550,11 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. Requests cannot exceed - Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object serviceAccountName: @@ -551,41 +565,39 @@ spec: description: 'Optional: set tolerations for the ActiveGatePods pods' items: - description: The pod this Toleration is attached to tolerates - any taint that matches the triple using - the matching operator . + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . properties: effect: - description: Effect indicates the taint effect to match. - Empty means match all taint effects. When specified, allowed - values are NoSchedule, PreferNoSchedule and NoExecute. + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. type: string key: - description: Key is the taint key that the toleration applies - to. Empty means match all taint keys. If the key is empty, - operator must be Exists; this combination means to match - all values and all keys. + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. type: string operator: - description: Operator represents a key's relationship to - the value. Valid operators are Exists and Equal. Defaults - to Equal. Exists is equivalent to wildcard for value, - so that a pod can tolerate all taints of a particular - category. + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. type: string tolerationSeconds: - description: TolerationSeconds represents the period of - time the toleration (which must be of effect NoExecute, - otherwise this field is ignored) tolerates the taint. - By default, it is not set, which means tolerate the taint - forever (do not evict). Zero and negative values will - be treated as 0 (evict immediately) by the system. + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. format: int64 type: integer value: - description: Value is the taint value the toleration matches - to. If the operator is Exists, the value should be empty, - otherwise just a regular string. + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. type: string type: object type: array @@ -602,13 +614,15 @@ spec: a new version is available type: boolean image: - description: 'Optional: the Dynatrace installer container image - Defaults to docker.io/dynatrace/oneagent:latest for Kubernetes - and to registry.connect.redhat.com/dynatrace/oneagent for OpenShift' + description: |- + Optional: the Dynatrace installer container image + Defaults to docker.io/dynatrace/oneagent:latest for Kubernetes and to registry.connect.redhat.com/dynatrace/oneagent for OpenShift type: string version: - description: 'Optional: If specified, indicates the OneAgent version - to use Defaults to latest Example: {major.minor.release} - 1.200.0' + description: |- + Optional: If specified, indicates the OneAgent version to use + Defaults to latest + Example: {major.minor.release} - 1.200.0 type: string type: object proxy: @@ -621,7 +635,7 @@ spec: type: string type: object routing: - description: Configuration for Routing + description: ' Configuration for Routing' properties: args: description: 'Optional: Adds additional arguments for the ActiveGate @@ -630,9 +644,9 @@ spec: type: string type: array customProperties: - description: 'Optional: Add a custom properties file by providing - it as a value or reference it from a secret If referenced from - a secret, make sure the key is called ''customProperties''' + description: |- + Optional: Add a custom properties file by providing it as a value or reference it from a secret + If referenced from a secret, make sure the key is called 'customProperties' properties: value: type: string @@ -654,15 +668,16 @@ spec: C_IDENTIFIER. type: string value: - description: 'Variable references $(VAR_NAME) are expanded - using the previously defined environment variables in - the container and any service environment variables. If - a variable cannot be resolved, the reference in the input - string will be unchanged. Double $$ are reduced to a single - $, which allows for escaping the $(VAR_NAME) syntax: i.e. + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless - of whether the variable exists or not. Defaults to "".' + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". type: string valueFrom: description: Source for the environment variable's value. @@ -675,9 +690,10 @@ spec: description: The key to select. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the ConfigMap or its @@ -688,11 +704,9 @@ spec: type: object x-kubernetes-map-type: atomic fieldRef: - description: 'Selects a field of the pod: supports metadata.name, - metadata.namespace, `metadata.labels['''']`, - `metadata.annotations['''']`, spec.nodeName, - spec.serviceAccountName, status.hostIP, status.podIP, - status.podIPs.' + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. properties: apiVersion: description: Version of the schema the FieldPath @@ -707,10 +721,9 @@ spec: type: object x-kubernetes-map-type: atomic resourceFieldRef: - description: 'Selects a resource of the container: only - resources limits and requests (limits.cpu, limits.memory, - limits.ephemeral-storage, requests.cpu, requests.memory - and requests.ephemeral-storage) are currently supported.' + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. properties: containerName: description: 'Container name: required for volumes, @@ -740,9 +753,10 @@ spec: be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the Secret or its key @@ -781,19 +795,24 @@ spec: single ActiveGate pods' properties: claims: - description: "Claims lists the names of resources, defined - in spec.resourceClaims, that are used by this container. - \n This is an alpha field and requires enabling the DynamicResourceAllocation - feature gate. \n This field is immutable. It can only be - set for containers." + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. properties: name: - description: Name must match the name of one entry in - pod.spec.resourceClaims of the Pod where this field - is used. It makes that resource available inside a - container. + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. type: string required: - name @@ -809,8 +828,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -819,11 +839,11 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. Requests cannot exceed - Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object serviceAccountName: @@ -834,41 +854,39 @@ spec: description: 'Optional: set tolerations for the ActiveGatePods pods' items: - description: The pod this Toleration is attached to tolerates - any taint that matches the triple using - the matching operator . + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . properties: effect: - description: Effect indicates the taint effect to match. - Empty means match all taint effects. When specified, allowed - values are NoSchedule, PreferNoSchedule and NoExecute. + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. type: string key: - description: Key is the taint key that the toleration applies - to. Empty means match all taint keys. If the key is empty, - operator must be Exists; this combination means to match - all values and all keys. + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. type: string operator: - description: Operator represents a key's relationship to - the value. Valid operators are Exists and Equal. Defaults - to Equal. Exists is equivalent to wildcard for value, - so that a pod can tolerate all taints of a particular - category. + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. type: string tolerationSeconds: - description: TolerationSeconds represents the period of - time the toleration (which must be of effect NoExecute, - otherwise this field is ignored) tolerates the taint. - By default, it is not set, which means tolerate the taint - forever (do not evict). Zero and negative values will - be treated as 0 (evict immediately) by the system. + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. format: int64 type: integer value: - description: Value is the taint value the toleration matches - to. If the operator is Exists, the value should be empty, - otherwise just a regular string. + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. type: string type: object type: array @@ -881,9 +899,10 @@ spec: description: Credentials for the DynaKube to connect back to Dynatrace. type: string trustedCAs: - description: 'Optional: Adds custom RootCAs from a configmap This - property only affects certificates used to communicate with the - Dynatrace API. The property is not applied to the ActiveGate' + description: |- + Optional: Adds custom RootCAs from a configmap + This property only affects certificates used to communicate with the Dynatrace API. + The property is not applied to the ActiveGate type: string required: - apiUrl @@ -911,42 +930,42 @@ spec: the instance items: description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + state of this API Resource.\n---\nThis struct is intended for + direct use as an array at the field path .status.conditions. For + example,\n\n\n\ttype FooStatus struct{\n\t // Represents the + observations of a foo's current state.\n\t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // + +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t + \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t + \ // other fields\n\t}" properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 @@ -960,11 +979,12 @@ spec: - Unknown type: string type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + --- + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + useful (see .node.status.conditions), the ability to deconflict is important. + The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -1067,14 +1087,19 @@ spec: description: DynaKube is the Schema for the DynaKube API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -1096,9 +1121,9 @@ spec: type: string type: array customProperties: - description: Add a custom properties file by providing it as a - value or reference it from a secret If referenced from a secret, - make sure the key is called 'customProperties' + description: |- + Add a custom properties file by providing it as a value or reference it from a secret + If referenced from a secret, make sure the key is called 'customProperties' properties: value: description: Custom properties value. @@ -1123,15 +1148,16 @@ spec: C_IDENTIFIER. type: string value: - description: 'Variable references $(VAR_NAME) are expanded - using the previously defined environment variables in - the container and any service environment variables. If - a variable cannot be resolved, the reference in the input - string will be unchanged. Double $$ are reduced to a single - $, which allows for escaping the $(VAR_NAME) syntax: i.e. + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless - of whether the variable exists or not. Defaults to "".' + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". type: string valueFrom: description: Source for the environment variable's value. @@ -1144,9 +1170,10 @@ spec: description: The key to select. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the ConfigMap or its @@ -1157,11 +1184,9 @@ spec: type: object x-kubernetes-map-type: atomic fieldRef: - description: 'Selects a field of the pod: supports metadata.name, - metadata.namespace, `metadata.labels['''']`, - `metadata.annotations['''']`, spec.nodeName, - spec.serviceAccountName, status.hostIP, status.podIP, - status.podIPs.' + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. properties: apiVersion: description: Version of the schema the FieldPath @@ -1176,10 +1201,9 @@ spec: type: object x-kubernetes-map-type: atomic resourceFieldRef: - description: 'Selects a resource of the container: only - resources limits and requests (limits.cpu, limits.memory, - limits.ephemeral-storage, requests.cpu, requests.memory - and requests.ephemeral-storage) are currently supported.' + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. properties: containerName: description: 'Container name: required for volumes, @@ -1209,9 +1233,10 @@ spec: be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the Secret or its key @@ -1244,10 +1269,9 @@ spec: description: Node selector to control the selection of nodes type: object priorityClassName: - description: If specified, indicates the pod's priority. Name - must be defined by creating a PriorityClass object with that - name. If not specified the setting will be removed from the - StatefulSet. + description: |- + If specified, indicates the pod's priority. Name must be defined by creating a PriorityClass object with that + name. If not specified the setting will be removed from the StatefulSet. type: string replicas: description: Amount of replicas for your ActiveGates @@ -1258,19 +1282,24 @@ spec: pods properties: claims: - description: "Claims lists the names of resources, defined - in spec.resourceClaims, that are used by this container. - \n This is an alpha field and requires enabling the DynamicResourceAllocation - feature gate. \n This field is immutable. It can only be - set for containers." + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. properties: name: - description: Name must match the name of one entry in - pod.spec.resourceClaims of the Pod where this field - is used. It makes that resource available inside a - container. + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. type: string required: - name @@ -1286,8 +1315,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -1296,57 +1326,55 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. Requests cannot exceed - Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object tlsSecretName: - description: 'The name of a secret containing ActiveGate TLS cert+key - and password. If not set, self-signed certificate is used. server.p12: - certificate+key pair in pkcs12 format password: passphrase to - read server.p12' + description: |- + The name of a secret containing ActiveGate TLS cert+key and password. If not set, self-signed certificate is used. + server.p12: certificate+key pair in pkcs12 format + password: passphrase to read server.p12 type: string tolerations: description: Set tolerations for the ActiveGate pods items: - description: The pod this Toleration is attached to tolerates - any taint that matches the triple using - the matching operator . + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . properties: effect: - description: Effect indicates the taint effect to match. - Empty means match all taint effects. When specified, allowed - values are NoSchedule, PreferNoSchedule and NoExecute. + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. type: string key: - description: Key is the taint key that the toleration applies - to. Empty means match all taint keys. If the key is empty, - operator must be Exists; this combination means to match - all values and all keys. + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. type: string operator: - description: Operator represents a key's relationship to - the value. Valid operators are Exists and Equal. Defaults - to Equal. Exists is equivalent to wildcard for value, - so that a pod can tolerate all taints of a particular - category. + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. type: string tolerationSeconds: - description: TolerationSeconds represents the period of - time the toleration (which must be of effect NoExecute, - otherwise this field is ignored) tolerates the taint. - By default, it is not set, which means tolerate the taint - forever (do not evict). Zero and negative values will - be treated as 0 (evict immediately) by the system. + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. format: int64 type: integer value: - description: Value is the taint value the toleration matches - to. If the operator is Exists, the value should be empty, - otherwise just a regular string. + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. type: string type: object type: array @@ -1358,16 +1386,17 @@ spec: matching pods among the given topology. properties: labelSelector: - description: LabelSelector is used to find matching pods. - Pods that match this label selector are counted to determine - the number of pods in their corresponding topology domain. + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. properties: key: @@ -1375,17 +1404,16 @@ spec: applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. - If the operator is In or NotIn, the values array - must be non-empty. If the operator is Exists - or DoesNotExist, the values array must be empty. - This array is replaced during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array @@ -1397,132 +1425,134 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field - is "key", the operator is "In", and the values array - contains only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic matchLabelKeys: - description: "MatchLabelKeys is a set of pod label keys - to select the pods over which spreading will be calculated. - The keys are used to lookup values from the incoming pod - labels, those key-value labels are ANDed with labelSelector - to select the group of existing pods over which spreading - will be calculated for the incoming pod. The same key - is forbidden to exist in both MatchLabelKeys and LabelSelector. - MatchLabelKeys cannot be set when LabelSelector isn't - set. Keys that don't exist in the incoming pod labels - will be ignored. A null or empty list means only match - against labelSelector. \n This is a beta field and requires - the MatchLabelKeysInPodTopologySpread feature gate to - be enabled (enabled by default)." + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). items: type: string type: array x-kubernetes-list-type: atomic maxSkew: - description: 'MaxSkew describes the degree to which pods - may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, - it is the maximum permitted difference between the number - of matching pods in the target topology and the global - minimum. The global minimum is the minimum number of matching - pods in an eligible domain or zero if the number of eligible - domains is less than MinDomains. For example, in a 3-zone - cluster, MaxSkew is set to 1, and pods with the same labelSelector - spread as 2/2/1: In this case, the global minimum is 1. - | zone1 | zone2 | zone3 | | P P | P P | P | - - if MaxSkew is 1, incoming pod can only be scheduled to - zone3 to become 2/2/2; scheduling it onto zone1(zone2) - would make the ActualSkew(3-1) on zone1(zone2) violate - MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled - onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, - it is used to give higher precedence to topologies that - satisfy it. It''s a required field. Default value is 1 - and 0 is not allowed.' + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. format: int32 type: integer minDomains: - description: "MinDomains indicates a minimum number of eligible - domains. When the number of eligible domains with matching - topology keys is less than minDomains, Pod Topology Spread - treats \"global minimum\" as 0, and then the calculation - of Skew is performed. And when the number of eligible - domains with matching topology keys equals or greater - than minDomains, this value has no effect on scheduling. - As a result, when the number of eligible domains is less - than minDomains, scheduler won't schedule more than maxSkew - Pods to those domains. If value is nil, the constraint - behaves as if MinDomains is equal to 1. Valid values are - integers greater than 0. When value is not nil, WhenUnsatisfiable - must be DoNotSchedule. \n For example, in a 3-zone cluster, - MaxSkew is set to 2, MinDomains is set to 5 and pods with - the same labelSelector spread as 2/2/2: | zone1 | zone2 - | zone3 | | P P | P P | P P | The number of domains - is less than 5(MinDomains), so \"global minimum\" is treated - as 0. In this situation, new pod with the same labelSelector - cannot be scheduled, because computed skew will be 3(3 - - 0) if new Pod is scheduled to any of the three zones, - it will violate MaxSkew. \n This is a beta field and requires - the MinDomainsInPodTopologySpread feature gate to be enabled - (enabled by default)." + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + + + This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default). format: int32 type: integer nodeAffinityPolicy: - description: "NodeAffinityPolicy indicates how we will treat - Pod's nodeAffinity/nodeSelector when calculating pod topology - spread skew. Options are: - Honor: only nodes matching - nodeAffinity/nodeSelector are included in the calculations. - - Ignore: nodeAffinity/nodeSelector are ignored. All nodes - are included in the calculations. \n If this value is - nil, the behavior is equivalent to the Honor policy. This - is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread - feature flag." + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string nodeTaintsPolicy: - description: "NodeTaintsPolicy indicates how we will treat - node taints when calculating pod topology spread skew. - Options are: - Honor: nodes without taints, along with - tainted nodes for which the incoming pod has a toleration, - are included. - Ignore: node taints are ignored. All nodes - are included. \n If this value is nil, the behavior is - equivalent to the Ignore policy. This is a beta-level - feature default enabled by the NodeInclusionPolicyInPodTopologySpread - feature flag." + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string topologyKey: - description: TopologyKey is the key of node labels. Nodes - that have a label with this key and identical values are - considered to be in the same topology. We consider each - as a "bucket", and try to put balanced number - of pods into each bucket. We define a domain as a particular - instance of a topology. Also, we define an eligible domain - as a domain whose nodes meet the requirements of nodeAffinityPolicy - and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", - each Node is a domain of that topology. And, if TopologyKey - is "topology.kubernetes.io/zone", each zone is a domain - of that topology. It's a required field. + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. type: string whenUnsatisfiable: - description: 'WhenUnsatisfiable indicates how to deal with - a pod if it doesn''t satisfy the spread constraint. - - DoNotSchedule (default) tells the scheduler not to schedule - it. - ScheduleAnyway tells the scheduler to schedule the - pod in any location, but giving higher precedence to topologies - that would help reduce the skew. A constraint is considered - "Unsatisfiable" for an incoming pod if and only if every - possible node assignment for that pod would violate "MaxSkew" - on some topology. For example, in a 3-zone cluster, MaxSkew - is set to 1, and pods with the same labelSelector spread - as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | - If WhenUnsatisfiable is set to DoNotSchedule, incoming - pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) - as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). - In other words, the cluster can still be imbalanced, but - scheduler won''t make it *more* imbalanced. It''s a required - field.' + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. type: string required: - maxSkew @@ -1532,31 +1562,29 @@ spec: type: array type: object apiUrl: - description: Dynatrace apiUrl, including the /api path at the end. - For SaaS, set YOUR_ENVIRONMENT_ID to your environment ID. For Managed, - change the apiUrl address. For instructions on how to determine - the environment ID and how to configure the apiUrl address, see - Environment ID (https://www.dynatrace.com/support/help/get-started/monitoring-environment/environment-id). + description: |- + Dynatrace apiUrl, including the /api path at the end. For SaaS, set YOUR_ENVIRONMENT_ID to your environment ID. For Managed, change the apiUrl address. + For instructions on how to determine the environment ID and how to configure the apiUrl address, see Environment ID (https://www.dynatrace.com/support/help/get-started/monitoring-environment/environment-id). type: string customPullSecret: - description: Defines a custom pull secret in case you use a private - registry when pulling images from the Dynatrace environment. To - define a custom pull secret and learn about the expected behavior, - see Configure customPullSecret (https://www.dynatrace.com/support/help/setup-and-configuration/setup-on-container-platforms/kubernetes/get-started-with-kubernetes-monitoring/dto-config-options-k8s#custompullsecret). + description: |- + Defines a custom pull secret in case you use a private registry when pulling images from the Dynatrace environment. + To define a custom pull secret and learn about the expected behavior, see Configure customPullSecret + (https://www.dynatrace.com/support/help/setup-and-configuration/setup-on-container-platforms/kubernetes/get-started-with-kubernetes-monitoring/dto-config-options-k8s#custompullsecret). type: string enableIstio: - description: When enabled, and if Istio is installed on the Kubernetes - environment, Dynatrace Operator will create the corresponding VirtualService - and ServiceEntry objects to allow access to the Dynatrace Cluster - from the OneAgent or ActiveGate. Disabled by default. + description: |- + When enabled, and if Istio is installed on the Kubernetes environment, Dynatrace Operator will create the corresponding + VirtualService and ServiceEntry objects to allow access to the Dynatrace Cluster from the OneAgent or ActiveGate. + Disabled by default. type: boolean kubernetesMonitoring: description: Configuration for Kubernetes Monitoring properties: customProperties: - description: Add a custom properties file by providing it as a - value or reference it from a secret If referenced from a secret, - make sure the key is called 'customProperties' + description: |- + Add a custom properties file by providing it as a value or reference it from a secret + If referenced from a secret, make sure the key is called 'customProperties' properties: value: description: Custom properties value. @@ -1581,15 +1609,16 @@ spec: C_IDENTIFIER. type: string value: - description: 'Variable references $(VAR_NAME) are expanded - using the previously defined environment variables in - the container and any service environment variables. If - a variable cannot be resolved, the reference in the input - string will be unchanged. Double $$ are reduced to a single - $, which allows for escaping the $(VAR_NAME) syntax: i.e. + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless - of whether the variable exists or not. Defaults to "".' + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". type: string valueFrom: description: Source for the environment variable's value. @@ -1602,9 +1631,10 @@ spec: description: The key to select. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the ConfigMap or its @@ -1615,11 +1645,9 @@ spec: type: object x-kubernetes-map-type: atomic fieldRef: - description: 'Selects a field of the pod: supports metadata.name, - metadata.namespace, `metadata.labels['''']`, - `metadata.annotations['''']`, spec.nodeName, - spec.serviceAccountName, status.hostIP, status.podIP, - status.podIPs.' + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. properties: apiVersion: description: Version of the schema the FieldPath @@ -1634,10 +1662,9 @@ spec: type: object x-kubernetes-map-type: atomic resourceFieldRef: - description: 'Selects a resource of the container: only - resources limits and requests (limits.cpu, limits.memory, - limits.ephemeral-storage, requests.cpu, requests.memory - and requests.ephemeral-storage) are currently supported.' + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. properties: containerName: description: 'Container name: required for volumes, @@ -1667,9 +1694,10 @@ spec: be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the Secret or its key @@ -1710,19 +1738,24 @@ spec: pods properties: claims: - description: "Claims lists the names of resources, defined - in spec.resourceClaims, that are used by this container. - \n This is an alpha field and requires enabling the DynamicResourceAllocation - feature gate. \n This field is immutable. It can only be - set for containers." + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. properties: name: - description: Name must match the name of one entry in - pod.spec.resourceClaims of the Pod where this field - is used. It makes that resource available inside a - container. + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. type: string required: - name @@ -1738,8 +1771,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -1748,51 +1782,49 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. Requests cannot exceed - Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object tolerations: description: Set tolerations for the ActiveGate pods items: - description: The pod this Toleration is attached to tolerates - any taint that matches the triple using - the matching operator . + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . properties: effect: - description: Effect indicates the taint effect to match. - Empty means match all taint effects. When specified, allowed - values are NoSchedule, PreferNoSchedule and NoExecute. + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. type: string key: - description: Key is the taint key that the toleration applies - to. Empty means match all taint keys. If the key is empty, - operator must be Exists; this combination means to match - all values and all keys. + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. type: string operator: - description: Operator represents a key's relationship to - the value. Valid operators are Exists and Equal. Defaults - to Equal. Exists is equivalent to wildcard for value, - so that a pod can tolerate all taints of a particular - category. + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. type: string tolerationSeconds: - description: TolerationSeconds represents the period of - time the toleration (which must be of effect NoExecute, - otherwise this field is ignored) tolerates the taint. - By default, it is not set, which means tolerate the taint - forever (do not evict). Zero and negative values will - be treated as 0 (evict immediately) by the system. + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. format: int64 type: integer value: - description: Value is the taint value the toleration matches - to. If the operator is Exists, the value should be empty, - otherwise just a regular string. + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. type: string type: object type: array @@ -1804,16 +1836,17 @@ spec: matching pods among the given topology. properties: labelSelector: - description: LabelSelector is used to find matching pods. - Pods that match this label selector are counted to determine - the number of pods in their corresponding topology domain. + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. properties: key: @@ -1821,17 +1854,16 @@ spec: applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. - If the operator is In or NotIn, the values array - must be non-empty. If the operator is Exists - or DoesNotExist, the values array must be empty. - This array is replaced during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array @@ -1843,132 +1875,134 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field - is "key", the operator is "In", and the values array - contains only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic matchLabelKeys: - description: "MatchLabelKeys is a set of pod label keys - to select the pods over which spreading will be calculated. - The keys are used to lookup values from the incoming pod - labels, those key-value labels are ANDed with labelSelector - to select the group of existing pods over which spreading - will be calculated for the incoming pod. The same key - is forbidden to exist in both MatchLabelKeys and LabelSelector. - MatchLabelKeys cannot be set when LabelSelector isn't - set. Keys that don't exist in the incoming pod labels - will be ignored. A null or empty list means only match - against labelSelector. \n This is a beta field and requires - the MatchLabelKeysInPodTopologySpread feature gate to - be enabled (enabled by default)." + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). items: type: string type: array x-kubernetes-list-type: atomic maxSkew: - description: 'MaxSkew describes the degree to which pods - may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, - it is the maximum permitted difference between the number - of matching pods in the target topology and the global - minimum. The global minimum is the minimum number of matching - pods in an eligible domain or zero if the number of eligible - domains is less than MinDomains. For example, in a 3-zone - cluster, MaxSkew is set to 1, and pods with the same labelSelector - spread as 2/2/1: In this case, the global minimum is 1. - | zone1 | zone2 | zone3 | | P P | P P | P | - - if MaxSkew is 1, incoming pod can only be scheduled to - zone3 to become 2/2/2; scheduling it onto zone1(zone2) - would make the ActualSkew(3-1) on zone1(zone2) violate - MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled - onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, - it is used to give higher precedence to topologies that - satisfy it. It''s a required field. Default value is 1 - and 0 is not allowed.' + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. format: int32 type: integer minDomains: - description: "MinDomains indicates a minimum number of eligible - domains. When the number of eligible domains with matching - topology keys is less than minDomains, Pod Topology Spread - treats \"global minimum\" as 0, and then the calculation - of Skew is performed. And when the number of eligible - domains with matching topology keys equals or greater - than minDomains, this value has no effect on scheduling. - As a result, when the number of eligible domains is less - than minDomains, scheduler won't schedule more than maxSkew - Pods to those domains. If value is nil, the constraint - behaves as if MinDomains is equal to 1. Valid values are - integers greater than 0. When value is not nil, WhenUnsatisfiable - must be DoNotSchedule. \n For example, in a 3-zone cluster, - MaxSkew is set to 2, MinDomains is set to 5 and pods with - the same labelSelector spread as 2/2/2: | zone1 | zone2 - | zone3 | | P P | P P | P P | The number of domains - is less than 5(MinDomains), so \"global minimum\" is treated - as 0. In this situation, new pod with the same labelSelector - cannot be scheduled, because computed skew will be 3(3 - - 0) if new Pod is scheduled to any of the three zones, - it will violate MaxSkew. \n This is a beta field and requires - the MinDomainsInPodTopologySpread feature gate to be enabled - (enabled by default)." + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + + + This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default). format: int32 type: integer nodeAffinityPolicy: - description: "NodeAffinityPolicy indicates how we will treat - Pod's nodeAffinity/nodeSelector when calculating pod topology - spread skew. Options are: - Honor: only nodes matching - nodeAffinity/nodeSelector are included in the calculations. - - Ignore: nodeAffinity/nodeSelector are ignored. All nodes - are included in the calculations. \n If this value is - nil, the behavior is equivalent to the Honor policy. This - is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread - feature flag." + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string nodeTaintsPolicy: - description: "NodeTaintsPolicy indicates how we will treat - node taints when calculating pod topology spread skew. - Options are: - Honor: nodes without taints, along with - tainted nodes for which the incoming pod has a toleration, - are included. - Ignore: node taints are ignored. All nodes - are included. \n If this value is nil, the behavior is - equivalent to the Ignore policy. This is a beta-level - feature default enabled by the NodeInclusionPolicyInPodTopologySpread - feature flag." + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string topologyKey: - description: TopologyKey is the key of node labels. Nodes - that have a label with this key and identical values are - considered to be in the same topology. We consider each - as a "bucket", and try to put balanced number - of pods into each bucket. We define a domain as a particular - instance of a topology. Also, we define an eligible domain - as a domain whose nodes meet the requirements of nodeAffinityPolicy - and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", - each Node is a domain of that topology. And, if TopologyKey - is "topology.kubernetes.io/zone", each zone is a domain - of that topology. It's a required field. + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. type: string whenUnsatisfiable: - description: 'WhenUnsatisfiable indicates how to deal with - a pod if it doesn''t satisfy the spread constraint. - - DoNotSchedule (default) tells the scheduler not to schedule - it. - ScheduleAnyway tells the scheduler to schedule the - pod in any location, but giving higher precedence to topologies - that would help reduce the skew. A constraint is considered - "Unsatisfiable" for an incoming pod if and only if every - possible node assignment for that pod would violate "MaxSkew" - on some topology. For example, in a 3-zone cluster, MaxSkew - is set to 1, and pods with the same labelSelector spread - as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | - If WhenUnsatisfiable is set to DoNotSchedule, incoming - pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) - as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). - In other words, the cluster can still be imbalanced, but - scheduler won''t make it *more* imbalanced. It''s a required - field.' + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. type: string required: - maxSkew @@ -1978,33 +2012,32 @@ spec: type: array type: object namespaceSelector: - description: Applicable only for applicationMonitoring or cloudNativeFullStack - configuration types. The namespaces where you want Dynatrace Operator - to inject. For more information, see Configure monitoring for namespaces - and pods (https://www.dynatrace.com/support/help/setup-and-configuration/setup-on-container-platforms/kubernetes/get-started-with-kubernetes-monitoring/dto-config-options-k8s#annotate). + description: |- + Applicable only for applicationMonitoring or cloudNativeFullStack configuration types. The namespaces where you want Dynatrace Operator to inject. + For more information, see Configure monitoring for namespaces and pods (https://www.dynatrace.com/support/help/setup-and-configuration/setup-on-container-platforms/kubernetes/get-started-with-kubernetes-monitoring/dto-config-options-k8s#annotate). properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector that - contains values, a key, and an operator that relates the key - and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship to - a set of values. Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. If the - operator is In or NotIn, the values array must be non-empty. - If the operator is Exists or DoesNotExist, the values - array must be empty. This array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string @@ -2017,11 +2050,10 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator - is "In", and the values array contains only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic @@ -2029,14 +2061,14 @@ spec: description: Sets a network zone for the OneAgent and ActiveGate pods. type: string oneAgent: - description: General configuration about OneAgent instances. You can't - enable more than one module (classicFullStack, cloudNativeFullStack, - hostMonitoring, or applicationMonitoring). + description: |- + General configuration about OneAgent instances. + You can't enable more than one module (classicFullStack, cloudNativeFullStack, hostMonitoring, or applicationMonitoring). properties: applicationMonitoring: - description: dynatrace-webhook injects into application pods based - on labeled namespaces. Has an optional CSI driver per node via - DaemonSet to provide binaries to pods. + description: |- + dynatrace-webhook injects into application pods based on labeled namespaces. + Has an optional CSI driver per node via DaemonSet to provide binaries to pods. nullable: true properties: codeModulesImage: @@ -2044,23 +2076,28 @@ spec: Pods. type: string initResources: - description: Define resources requests and limits for the - initContainer. For details, see Managing resources for containers + description: |- + Define resources requests and limits for the initContainer. For details, see Managing resources for containers (https://kubernetes.io/docs/concepts/configuration/manage-resources-containers). properties: claims: - description: "Claims lists the names of resources, defined - in spec.resourceClaims, that are used by this container. - \n This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. \n This field - is immutable. It can only be set for containers." + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. properties: name: - description: Name must match the name of one entry - in pod.spec.resourceClaims of the Pod where this - field is used. It makes that resource available + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available inside a container. type: string required: @@ -2077,8 +2114,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -2087,11 +2125,11 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of - compute resources required. If Requests is omitted for - a container, it defaults to Limits if that is explicitly - specified, otherwise to an implementation-defined value. - Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object useCSIDriver: @@ -2104,8 +2142,9 @@ spec: type: string type: object classicFullStack: - description: Has a single OneAgent per node via DaemonSet. Injection - is performed via the same OneAgent DaemonSet. + description: |- + Has a single OneAgent per node via DaemonSet. + Injection is performed via the same OneAgent DaemonSet. nullable: true properties: annotations: @@ -2114,7 +2153,8 @@ spec: description: Add custom OneAgent annotations. type: object args: - description: Set additional arguments to the OneAgent installer. + description: |- + Set additional arguments to the OneAgent installer. For available options, see Linux custom installation (https://www.dynatrace.com/support/help/setup-and-configuration/dynatrace-oneagent/installation-and-operation/linux/installation/customize-oneagent-installation-on-linux). For the list of limitations, see Limitations (https://www.dynatrace.com/support/help/setup-and-configuration/setup-on-container-platforms/docker/set-up-dynatrace-oneagent-as-docker-container#limitations). items: @@ -2122,8 +2162,8 @@ spec: type: array x-kubernetes-list-type: set autoUpdate: - description: Disables automatic restarts of OneAgent pods - in case a new version is available (https://www.dynatrace.com/support/help/setup-and-configuration/setup-on-container-platforms/kubernetes/get-started-with-kubernetes-monitoring#disable-auto). + description: |- + Disables automatic restarts of OneAgent pods in case a new version is available (https://www.dynatrace.com/support/help/setup-and-configuration/setup-on-container-platforms/kubernetes/get-started-with-kubernetes-monitoring#disable-auto). Enabled by default. type: boolean dnsPolicy: @@ -2142,16 +2182,16 @@ spec: be a C_IDENTIFIER. type: string value: - description: 'Variable references $(VAR_NAME) are expanded - using the previously defined environment variables - in the container and any service environment variables. - If a variable cannot be resolved, the reference in - the input string will be unchanged. Double $$ are - reduced to a single $, which allows for escaping the - $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce - the string literal "$(VAR_NAME)". Escaped references - will never be expanded, regardless of whether the - variable exists or not. Defaults to "".' + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". type: string valueFrom: description: Source for the environment variable's value. @@ -2164,10 +2204,10 @@ spec: description: The key to select. type: string name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the ConfigMap or @@ -2178,11 +2218,9 @@ spec: type: object x-kubernetes-map-type: atomic fieldRef: - description: 'Selects a field of the pod: supports - metadata.name, metadata.namespace, `metadata.labels['''']`, - `metadata.annotations['''']`, spec.nodeName, - spec.serviceAccountName, status.hostIP, status.podIP, - status.podIPs.' + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. properties: apiVersion: description: Version of the schema the FieldPath @@ -2197,11 +2235,9 @@ spec: type: object x-kubernetes-map-type: atomic resourceFieldRef: - description: 'Selects a resource of the container: - only resources limits and requests (limits.cpu, - limits.memory, limits.ephemeral-storage, requests.cpu, - requests.memory and requests.ephemeral-storage) - are currently supported.' + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. properties: containerName: description: 'Container name: required for volumes, @@ -2231,10 +2267,10 @@ spec: from. Must be a valid secret key. type: string name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the Secret or its @@ -2266,25 +2302,28 @@ spec: nodes OneAgent will be deployed. type: object oneAgentResources: - description: 'Resource settings for OneAgent container. Consumption - of the OneAgent heavily depends on the workload to monitor. - You can use the default settings in the CR. Note: resource.requests - shows the values needed to run; resource.limits shows the - maximum limits for the pod.' + description: |- + Resource settings for OneAgent container. Consumption of the OneAgent heavily depends on the workload to monitor. You can use the default settings in the CR. + Note: resource.requests shows the values needed to run; resource.limits shows the maximum limits for the pod. properties: claims: - description: "Claims lists the names of resources, defined - in spec.resourceClaims, that are used by this container. - \n This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. \n This field - is immutable. It can only be set for containers." + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. properties: name: - description: Name must match the name of one entry - in pod.spec.resourceClaims of the Pod where this - field is used. It makes that resource available + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available inside a container. type: string required: @@ -2301,8 +2340,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -2311,58 +2351,55 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of - compute resources required. If Requests is omitted for - a container, it defaults to Limits if that is explicitly - specified, otherwise to an implementation-defined value. - Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object priorityClassName: - description: Assign a priority class to the OneAgent pods. - By default, no class is set. For details, see Pod Priority - and Preemption (https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/). + description: |- + Assign a priority class to the OneAgent pods. By default, no class is set. + For details, see Pod Priority and Preemption (https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/). type: string tolerations: description: Tolerations to include with the OneAgent DaemonSet. For details, see Taints and Tolerations (https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/). items: - description: The pod this Toleration is attached to tolerates - any taint that matches the triple using - the matching operator . + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . properties: effect: - description: Effect indicates the taint effect to match. - Empty means match all taint effects. When specified, - allowed values are NoSchedule, PreferNoSchedule and - NoExecute. + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. type: string key: - description: Key is the taint key that the toleration - applies to. Empty means match all taint keys. If the - key is empty, operator must be Exists; this combination - means to match all values and all keys. + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. type: string operator: - description: Operator represents a key's relationship - to the value. Valid operators are Exists and Equal. - Defaults to Equal. Exists is equivalent to wildcard - for value, so that a pod can tolerate all taints of - a particular category. + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. type: string tolerationSeconds: - description: TolerationSeconds represents the period - of time the toleration (which must be of effect NoExecute, - otherwise this field is ignored) tolerates the taint. - By default, it is not set, which means tolerate the - taint forever (do not evict). Zero and negative values - will be treated as 0 (evict immediately) by the system. + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. format: int64 type: integer value: - description: Value is the taint value the toleration - matches to. If the operator is Exists, the value should - be empty, otherwise just a regular string. + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. type: string type: object type: array @@ -2371,9 +2408,10 @@ spec: type: string type: object cloudNativeFullStack: - description: Has a single OneAgent per node via DaemonSet. dynatrace-webhook - injects into application pods based on labeled namespaces. Has - a CSI driver per node via DaemonSet to provide binaries to pods. + description: |- + Has a single OneAgent per node via DaemonSet. + dynatrace-webhook injects into application pods based on labeled namespaces. + Has a CSI driver per node via DaemonSet to provide binaries to pods. nullable: true properties: annotations: @@ -2382,7 +2420,8 @@ spec: description: Add custom OneAgent annotations. type: object args: - description: Set additional arguments to the OneAgent installer. + description: |- + Set additional arguments to the OneAgent installer. For available options, see Linux custom installation (https://www.dynatrace.com/support/help/setup-and-configuration/dynatrace-oneagent/installation-and-operation/linux/installation/customize-oneagent-installation-on-linux). For the list of limitations, see Limitations (https://www.dynatrace.com/support/help/setup-and-configuration/setup-on-container-platforms/docker/set-up-dynatrace-oneagent-as-docker-container#limitations). items: @@ -2390,8 +2429,8 @@ spec: type: array x-kubernetes-list-type: set autoUpdate: - description: Disables automatic restarts of OneAgent pods - in case a new version is available (https://www.dynatrace.com/support/help/setup-and-configuration/setup-on-container-platforms/kubernetes/get-started-with-kubernetes-monitoring#disable-auto). + description: |- + Disables automatic restarts of OneAgent pods in case a new version is available (https://www.dynatrace.com/support/help/setup-and-configuration/setup-on-container-platforms/kubernetes/get-started-with-kubernetes-monitoring#disable-auto). Enabled by default. type: boolean codeModulesImage: @@ -2414,16 +2453,16 @@ spec: be a C_IDENTIFIER. type: string value: - description: 'Variable references $(VAR_NAME) are expanded - using the previously defined environment variables - in the container and any service environment variables. - If a variable cannot be resolved, the reference in - the input string will be unchanged. Double $$ are - reduced to a single $, which allows for escaping the - $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce - the string literal "$(VAR_NAME)". Escaped references - will never be expanded, regardless of whether the - variable exists or not. Defaults to "".' + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". type: string valueFrom: description: Source for the environment variable's value. @@ -2436,10 +2475,10 @@ spec: description: The key to select. type: string name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the ConfigMap or @@ -2450,11 +2489,9 @@ spec: type: object x-kubernetes-map-type: atomic fieldRef: - description: 'Selects a field of the pod: supports - metadata.name, metadata.namespace, `metadata.labels['''']`, - `metadata.annotations['''']`, spec.nodeName, - spec.serviceAccountName, status.hostIP, status.podIP, - status.podIPs.' + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. properties: apiVersion: description: Version of the schema the FieldPath @@ -2469,11 +2506,9 @@ spec: type: object x-kubernetes-map-type: atomic resourceFieldRef: - description: 'Selects a resource of the container: - only resources limits and requests (limits.cpu, - limits.memory, limits.ephemeral-storage, requests.cpu, - requests.memory and requests.ephemeral-storage) - are currently supported.' + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. properties: containerName: description: 'Container name: required for volumes, @@ -2503,10 +2538,10 @@ spec: from. Must be a valid secret key. type: string name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the Secret or its @@ -2526,23 +2561,28 @@ spec: to the image from the Dynatrace cluster. type: string initResources: - description: Define resources requests and limits for the - initContainer. For details, see Managing resources for containers + description: |- + Define resources requests and limits for the initContainer. For details, see Managing resources for containers (https://kubernetes.io/docs/concepts/configuration/manage-resources-containers). properties: claims: - description: "Claims lists the names of resources, defined - in spec.resourceClaims, that are used by this container. - \n This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. \n This field - is immutable. It can only be set for containers." + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. properties: name: - description: Name must match the name of one entry - in pod.spec.resourceClaims of the Pod where this - field is used. It makes that resource available + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available inside a container. type: string required: @@ -2559,8 +2599,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -2569,11 +2610,11 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of - compute resources required. If Requests is omitted for - a container, it defaults to Limits if that is explicitly - specified, otherwise to an implementation-defined value. - Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object labels: @@ -2589,25 +2630,28 @@ spec: nodes OneAgent will be deployed. type: object oneAgentResources: - description: 'Resource settings for OneAgent container. Consumption - of the OneAgent heavily depends on the workload to monitor. - You can use the default settings in the CR. Note: resource.requests - shows the values needed to run; resource.limits shows the - maximum limits for the pod.' + description: |- + Resource settings for OneAgent container. Consumption of the OneAgent heavily depends on the workload to monitor. You can use the default settings in the CR. + Note: resource.requests shows the values needed to run; resource.limits shows the maximum limits for the pod. properties: claims: - description: "Claims lists the names of resources, defined - in spec.resourceClaims, that are used by this container. - \n This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. \n This field - is immutable. It can only be set for containers." + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. properties: name: - description: Name must match the name of one entry - in pod.spec.resourceClaims of the Pod where this - field is used. It makes that resource available + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available inside a container. type: string required: @@ -2624,8 +2668,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -2634,58 +2679,55 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of - compute resources required. If Requests is omitted for - a container, it defaults to Limits if that is explicitly - specified, otherwise to an implementation-defined value. - Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object priorityClassName: - description: Assign a priority class to the OneAgent pods. - By default, no class is set. For details, see Pod Priority - and Preemption (https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/). + description: |- + Assign a priority class to the OneAgent pods. By default, no class is set. + For details, see Pod Priority and Preemption (https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/). type: string tolerations: description: Tolerations to include with the OneAgent DaemonSet. For details, see Taints and Tolerations (https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/). items: - description: The pod this Toleration is attached to tolerates - any taint that matches the triple using - the matching operator . + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . properties: effect: - description: Effect indicates the taint effect to match. - Empty means match all taint effects. When specified, - allowed values are NoSchedule, PreferNoSchedule and - NoExecute. + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. type: string key: - description: Key is the taint key that the toleration - applies to. Empty means match all taint keys. If the - key is empty, operator must be Exists; this combination - means to match all values and all keys. + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. type: string operator: - description: Operator represents a key's relationship - to the value. Valid operators are Exists and Equal. - Defaults to Equal. Exists is equivalent to wildcard - for value, so that a pod can tolerate all taints of - a particular category. + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. type: string tolerationSeconds: - description: TolerationSeconds represents the period - of time the toleration (which must be of effect NoExecute, - otherwise this field is ignored) tolerates the taint. - By default, it is not set, which means tolerate the - taint forever (do not evict). Zero and negative values - will be treated as 0 (evict immediately) by the system. + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. format: int64 type: integer value: - description: Value is the taint value the toleration - matches to. If the operator is Exists, the value should - be empty, otherwise just a regular string. + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. type: string type: object type: array @@ -2693,9 +2735,13 @@ spec: description: The OneAgent version to be used. type: string type: object + hostGroup: + description: Sets a host group for OneAgent. + type: string hostMonitoring: - description: Has a single OneAgent per node via DaemonSet. Doesn't - inject into application pods. + description: |- + Has a single OneAgent per node via DaemonSet. + Doesn't inject into application pods. nullable: true properties: annotations: @@ -2704,7 +2750,8 @@ spec: description: Add custom OneAgent annotations. type: object args: - description: Set additional arguments to the OneAgent installer. + description: |- + Set additional arguments to the OneAgent installer. For available options, see Linux custom installation (https://www.dynatrace.com/support/help/setup-and-configuration/dynatrace-oneagent/installation-and-operation/linux/installation/customize-oneagent-installation-on-linux). For the list of limitations, see Limitations (https://www.dynatrace.com/support/help/setup-and-configuration/setup-on-container-platforms/docker/set-up-dynatrace-oneagent-as-docker-container#limitations). items: @@ -2712,8 +2759,8 @@ spec: type: array x-kubernetes-list-type: set autoUpdate: - description: Disables automatic restarts of OneAgent pods - in case a new version is available (https://www.dynatrace.com/support/help/setup-and-configuration/setup-on-container-platforms/kubernetes/get-started-with-kubernetes-monitoring#disable-auto). + description: |- + Disables automatic restarts of OneAgent pods in case a new version is available (https://www.dynatrace.com/support/help/setup-and-configuration/setup-on-container-platforms/kubernetes/get-started-with-kubernetes-monitoring#disable-auto). Enabled by default. type: boolean dnsPolicy: @@ -2732,16 +2779,16 @@ spec: be a C_IDENTIFIER. type: string value: - description: 'Variable references $(VAR_NAME) are expanded - using the previously defined environment variables - in the container and any service environment variables. - If a variable cannot be resolved, the reference in - the input string will be unchanged. Double $$ are - reduced to a single $, which allows for escaping the - $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce - the string literal "$(VAR_NAME)". Escaped references - will never be expanded, regardless of whether the - variable exists or not. Defaults to "".' + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". type: string valueFrom: description: Source for the environment variable's value. @@ -2754,10 +2801,10 @@ spec: description: The key to select. type: string name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the ConfigMap or @@ -2768,11 +2815,9 @@ spec: type: object x-kubernetes-map-type: atomic fieldRef: - description: 'Selects a field of the pod: supports - metadata.name, metadata.namespace, `metadata.labels['''']`, - `metadata.annotations['''']`, spec.nodeName, - spec.serviceAccountName, status.hostIP, status.podIP, - status.podIPs.' + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. properties: apiVersion: description: Version of the schema the FieldPath @@ -2787,11 +2832,9 @@ spec: type: object x-kubernetes-map-type: atomic resourceFieldRef: - description: 'Selects a resource of the container: - only resources limits and requests (limits.cpu, - limits.memory, limits.ephemeral-storage, requests.cpu, - requests.memory and requests.ephemeral-storage) - are currently supported.' + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. properties: containerName: description: 'Container name: required for volumes, @@ -2821,10 +2864,10 @@ spec: from. Must be a valid secret key. type: string name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the Secret or its @@ -2856,25 +2899,28 @@ spec: nodes OneAgent will be deployed. type: object oneAgentResources: - description: 'Resource settings for OneAgent container. Consumption - of the OneAgent heavily depends on the workload to monitor. - You can use the default settings in the CR. Note: resource.requests - shows the values needed to run; resource.limits shows the - maximum limits for the pod.' + description: |- + Resource settings for OneAgent container. Consumption of the OneAgent heavily depends on the workload to monitor. You can use the default settings in the CR. + Note: resource.requests shows the values needed to run; resource.limits shows the maximum limits for the pod. properties: claims: - description: "Claims lists the names of resources, defined - in spec.resourceClaims, that are used by this container. - \n This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. \n This field - is immutable. It can only be set for containers." + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. properties: name: - description: Name must match the name of one entry - in pod.spec.resourceClaims of the Pod where this - field is used. It makes that resource available + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available inside a container. type: string required: @@ -2891,8 +2937,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -2901,58 +2948,55 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of - compute resources required. If Requests is omitted for - a container, it defaults to Limits if that is explicitly - specified, otherwise to an implementation-defined value. - Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object priorityClassName: - description: Assign a priority class to the OneAgent pods. - By default, no class is set. For details, see Pod Priority - and Preemption (https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/). + description: |- + Assign a priority class to the OneAgent pods. By default, no class is set. + For details, see Pod Priority and Preemption (https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/). type: string tolerations: description: Tolerations to include with the OneAgent DaemonSet. For details, see Taints and Tolerations (https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/). items: - description: The pod this Toleration is attached to tolerates - any taint that matches the triple using - the matching operator . + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . properties: effect: - description: Effect indicates the taint effect to match. - Empty means match all taint effects. When specified, - allowed values are NoSchedule, PreferNoSchedule and - NoExecute. + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. type: string key: - description: Key is the taint key that the toleration - applies to. Empty means match all taint keys. If the - key is empty, operator must be Exists; this combination - means to match all values and all keys. + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. type: string operator: - description: Operator represents a key's relationship - to the value. Valid operators are Exists and Equal. - Defaults to Equal. Exists is equivalent to wildcard - for value, so that a pod can tolerate all taints of - a particular category. + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. type: string tolerationSeconds: - description: TolerationSeconds represents the period - of time the toleration (which must be of effect NoExecute, - otherwise this field is ignored) tolerates the taint. - By default, it is not set, which means tolerate the - taint forever (do not evict). Zero and negative values - will be treated as 0 (evict immediately) by the system. + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. format: int64 type: integer value: - description: Value is the taint value the toleration - matches to. If the operator is Exists, the value should - be empty, otherwise just a regular string. + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. type: string type: object type: array @@ -2962,9 +3006,9 @@ spec: type: object type: object proxy: - description: 'Set custom proxy settings either directly or from a - secret with the field proxy. Note: Applies to Dynatrace Operator, - ActiveGate, and OneAgents.' + description: |- + Set custom proxy settings either directly or from a secret with the field proxy. + Note: Applies to Dynatrace Operator, ActiveGate, and OneAgents. properties: value: description: Proxy URL. It has preference over ValueFrom. @@ -2979,9 +3023,9 @@ spec: description: Configuration for Routing properties: customProperties: - description: Add a custom properties file by providing it as a - value or reference it from a secret If referenced from a secret, - make sure the key is called 'customProperties' + description: |- + Add a custom properties file by providing it as a value or reference it from a secret + If referenced from a secret, make sure the key is called 'customProperties' properties: value: description: Custom properties value. @@ -3006,15 +3050,16 @@ spec: C_IDENTIFIER. type: string value: - description: 'Variable references $(VAR_NAME) are expanded - using the previously defined environment variables in - the container and any service environment variables. If - a variable cannot be resolved, the reference in the input - string will be unchanged. Double $$ are reduced to a single - $, which allows for escaping the $(VAR_NAME) syntax: i.e. + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless - of whether the variable exists or not. Defaults to "".' + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". type: string valueFrom: description: Source for the environment variable's value. @@ -3027,9 +3072,10 @@ spec: description: The key to select. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the ConfigMap or its @@ -3040,11 +3086,9 @@ spec: type: object x-kubernetes-map-type: atomic fieldRef: - description: 'Selects a field of the pod: supports metadata.name, - metadata.namespace, `metadata.labels['''']`, - `metadata.annotations['''']`, spec.nodeName, - spec.serviceAccountName, status.hostIP, status.podIP, - status.podIPs.' + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. properties: apiVersion: description: Version of the schema the FieldPath @@ -3059,10 +3103,9 @@ spec: type: object x-kubernetes-map-type: atomic resourceFieldRef: - description: 'Selects a resource of the container: only - resources limits and requests (limits.cpu, limits.memory, - limits.ephemeral-storage, requests.cpu, requests.memory - and requests.ephemeral-storage) are currently supported.' + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. properties: containerName: description: 'Container name: required for volumes, @@ -3092,9 +3135,10 @@ spec: be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the Secret or its key @@ -3135,19 +3179,24 @@ spec: pods properties: claims: - description: "Claims lists the names of resources, defined - in spec.resourceClaims, that are used by this container. - \n This is an alpha field and requires enabling the DynamicResourceAllocation - feature gate. \n This field is immutable. It can only be - set for containers." + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. properties: name: - description: Name must match the name of one entry in - pod.spec.resourceClaims of the Pod where this field - is used. It makes that resource available inside a - container. + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. type: string required: - name @@ -3163,8 +3212,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -3173,51 +3223,49 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. Requests cannot exceed - Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object tolerations: description: Set tolerations for the ActiveGate pods items: - description: The pod this Toleration is attached to tolerates - any taint that matches the triple using - the matching operator . + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . properties: effect: - description: Effect indicates the taint effect to match. - Empty means match all taint effects. When specified, allowed - values are NoSchedule, PreferNoSchedule and NoExecute. + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. type: string key: - description: Key is the taint key that the toleration applies - to. Empty means match all taint keys. If the key is empty, - operator must be Exists; this combination means to match - all values and all keys. + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. type: string operator: - description: Operator represents a key's relationship to - the value. Valid operators are Exists and Equal. Defaults - to Equal. Exists is equivalent to wildcard for value, - so that a pod can tolerate all taints of a particular - category. + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. type: string tolerationSeconds: - description: TolerationSeconds represents the period of - time the toleration (which must be of effect NoExecute, - otherwise this field is ignored) tolerates the taint. - By default, it is not set, which means tolerate the taint - forever (do not evict). Zero and negative values will - be treated as 0 (evict immediately) by the system. + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. format: int64 type: integer value: - description: Value is the taint value the toleration matches - to. If the operator is Exists, the value should be empty, - otherwise just a regular string. + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. type: string type: object type: array @@ -3229,16 +3277,17 @@ spec: matching pods among the given topology. properties: labelSelector: - description: LabelSelector is used to find matching pods. - Pods that match this label selector are counted to determine - the number of pods in their corresponding topology domain. + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. properties: key: @@ -3246,17 +3295,16 @@ spec: applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. - If the operator is In or NotIn, the values array - must be non-empty. If the operator is Exists - or DoesNotExist, the values array must be empty. - This array is replaced during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array @@ -3268,132 +3316,134 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field - is "key", the operator is "In", and the values array - contains only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic matchLabelKeys: - description: "MatchLabelKeys is a set of pod label keys - to select the pods over which spreading will be calculated. - The keys are used to lookup values from the incoming pod - labels, those key-value labels are ANDed with labelSelector - to select the group of existing pods over which spreading - will be calculated for the incoming pod. The same key - is forbidden to exist in both MatchLabelKeys and LabelSelector. - MatchLabelKeys cannot be set when LabelSelector isn't - set. Keys that don't exist in the incoming pod labels - will be ignored. A null or empty list means only match - against labelSelector. \n This is a beta field and requires - the MatchLabelKeysInPodTopologySpread feature gate to - be enabled (enabled by default)." + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). items: type: string type: array x-kubernetes-list-type: atomic maxSkew: - description: 'MaxSkew describes the degree to which pods - may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, - it is the maximum permitted difference between the number - of matching pods in the target topology and the global - minimum. The global minimum is the minimum number of matching - pods in an eligible domain or zero if the number of eligible - domains is less than MinDomains. For example, in a 3-zone - cluster, MaxSkew is set to 1, and pods with the same labelSelector - spread as 2/2/1: In this case, the global minimum is 1. - | zone1 | zone2 | zone3 | | P P | P P | P | - - if MaxSkew is 1, incoming pod can only be scheduled to - zone3 to become 2/2/2; scheduling it onto zone1(zone2) - would make the ActualSkew(3-1) on zone1(zone2) violate - MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled - onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, - it is used to give higher precedence to topologies that - satisfy it. It''s a required field. Default value is 1 - and 0 is not allowed.' + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. format: int32 type: integer minDomains: - description: "MinDomains indicates a minimum number of eligible - domains. When the number of eligible domains with matching - topology keys is less than minDomains, Pod Topology Spread - treats \"global minimum\" as 0, and then the calculation - of Skew is performed. And when the number of eligible - domains with matching topology keys equals or greater - than minDomains, this value has no effect on scheduling. - As a result, when the number of eligible domains is less - than minDomains, scheduler won't schedule more than maxSkew - Pods to those domains. If value is nil, the constraint - behaves as if MinDomains is equal to 1. Valid values are - integers greater than 0. When value is not nil, WhenUnsatisfiable - must be DoNotSchedule. \n For example, in a 3-zone cluster, - MaxSkew is set to 2, MinDomains is set to 5 and pods with - the same labelSelector spread as 2/2/2: | zone1 | zone2 - | zone3 | | P P | P P | P P | The number of domains - is less than 5(MinDomains), so \"global minimum\" is treated - as 0. In this situation, new pod with the same labelSelector - cannot be scheduled, because computed skew will be 3(3 - - 0) if new Pod is scheduled to any of the three zones, - it will violate MaxSkew. \n This is a beta field and requires - the MinDomainsInPodTopologySpread feature gate to be enabled - (enabled by default)." + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + + + This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default). format: int32 type: integer nodeAffinityPolicy: - description: "NodeAffinityPolicy indicates how we will treat - Pod's nodeAffinity/nodeSelector when calculating pod topology - spread skew. Options are: - Honor: only nodes matching - nodeAffinity/nodeSelector are included in the calculations. - - Ignore: nodeAffinity/nodeSelector are ignored. All nodes - are included in the calculations. \n If this value is - nil, the behavior is equivalent to the Honor policy. This - is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread - feature flag." + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string nodeTaintsPolicy: - description: "NodeTaintsPolicy indicates how we will treat - node taints when calculating pod topology spread skew. - Options are: - Honor: nodes without taints, along with - tainted nodes for which the incoming pod has a toleration, - are included. - Ignore: node taints are ignored. All nodes - are included. \n If this value is nil, the behavior is - equivalent to the Ignore policy. This is a beta-level - feature default enabled by the NodeInclusionPolicyInPodTopologySpread - feature flag." + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string topologyKey: - description: TopologyKey is the key of node labels. Nodes - that have a label with this key and identical values are - considered to be in the same topology. We consider each - as a "bucket", and try to put balanced number - of pods into each bucket. We define a domain as a particular - instance of a topology. Also, we define an eligible domain - as a domain whose nodes meet the requirements of nodeAffinityPolicy - and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", - each Node is a domain of that topology. And, if TopologyKey - is "topology.kubernetes.io/zone", each zone is a domain - of that topology. It's a required field. + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. type: string whenUnsatisfiable: - description: 'WhenUnsatisfiable indicates how to deal with - a pod if it doesn''t satisfy the spread constraint. - - DoNotSchedule (default) tells the scheduler not to schedule - it. - ScheduleAnyway tells the scheduler to schedule the - pod in any location, but giving higher precedence to topologies - that would help reduce the skew. A constraint is considered - "Unsatisfiable" for an incoming pod if and only if every - possible node assignment for that pod would violate "MaxSkew" - on some topology. For example, in a 3-zone cluster, MaxSkew - is set to 1, and pods with the same labelSelector spread - as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | - If WhenUnsatisfiable is set to DoNotSchedule, incoming - pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) - as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). - In other words, the cluster can still be imbalanced, but - scheduler won''t make it *more* imbalanced. It''s a required - field.' + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. type: string required: - maxSkew @@ -3403,18 +3453,18 @@ spec: type: array type: object skipCertCheck: - description: Disable certificate check for the connection between - Dynatrace Operator and the Dynatrace Cluster. Set to true if you - want to skip certification validation checks. + description: |- + Disable certificate check for the connection between Dynatrace Operator and the Dynatrace Cluster. + Set to true if you want to skip certification validation checks. type: boolean tokens: description: Name of the secret holding the tokens used for connecting to Dynatrace. type: string trustedCAs: - description: 'Adds custom RootCAs from a configmap. Put the certificate - under certs within your configmap. Note: Applies only to Dynatrace - Operator and OneAgent, not to ActiveGate.' + description: |- + Adds custom RootCAs from a configmap. Put the certificate under certs within your configmap. + Note: Applies only to Dynatrace Operator and OneAgent, not to ActiveGate. type: string required: - apiUrl @@ -3485,42 +3535,42 @@ spec: the instance items: description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + state of this API Resource.\n---\nThis struct is intended for + direct use as an array at the field path .status.conditions. For + example,\n\n\n\ttype FooStatus struct{\n\t // Represents the + observations of a foo's current state.\n\t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // + +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t + \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t + \ // other fields\n\t}" properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 @@ -3534,11 +3584,12 @@ spec: - Unknown type: string type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + --- + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + useful (see .node.status.conditions), the ability to deconflict is important. + The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -3628,6 +3679,10 @@ spec: performed format: date-time type: string + lastProcessModuleConfigUpdate: + description: Time of the last process module config update + format: date-time + type: string source: description: Source of the image (tenant-registry, public-registry, ...) @@ -3681,7 +3736,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.13.0 + controller-gen.kubebuilder.io/version: v0.14.0 name: edgeconnects.dynatrace.com spec: group: dynatrace.com @@ -3710,19 +3765,24 @@ spec: description: EdgeConnect is the Schema for the EdgeConnect API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: - description: EdgeConnectSpec defines the desired state of EdgeConnect + description: EdgeConnectSpec defines the desired state of EdgeConnect. properties: annotations: additionalProperties: @@ -3752,15 +3812,16 @@ spec: description: Name of the environment variable. Must be a C_IDENTIFIER. type: string value: - description: 'Variable references $(VAR_NAME) are expanded using - the previously defined environment variables in the container - and any service environment variables. If a variable cannot - be resolved, the reference in the input string will be unchanged. - Double $$ are reduced to a single $, which allows for escaping - the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the - string literal "$(VAR_NAME)". Escaped references will never - be expanded, regardless of whether the variable exists or - not. Defaults to "".' + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". type: string valueFrom: description: Source for the environment variable's value. Cannot @@ -3773,8 +3834,10 @@ spec: description: The key to select. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the ConfigMap or its key @@ -3785,10 +3848,9 @@ spec: type: object x-kubernetes-map-type: atomic fieldRef: - description: 'Selects a field of the pod: supports metadata.name, - metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, - status.podIP, status.podIPs.' + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. properties: apiVersion: description: Version of the schema the FieldPath is @@ -3803,10 +3865,9 @@ spec: type: object x-kubernetes-map-type: atomic resourceFieldRef: - description: 'Selects a resource of the container: only - resources limits and requests (limits.cpu, limits.memory, - limits.ephemeral-storage, requests.cpu, requests.memory - and requests.ephemeral-storage) are currently supported.' + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. properties: containerName: description: 'Container name: required for volumes, @@ -3835,8 +3896,10 @@ spec: be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the Secret or its key must @@ -3918,18 +3981,24 @@ spec: description: Defines resources requests and limits for single pods properties: claims: - description: "Claims lists the names of resources, defined in - spec.resourceClaims, that are used by this container. \n This - is an alpha field and requires enabling the DynamicResourceAllocation - feature gate. \n This field is immutable. It can only be set - for containers." + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. properties: name: - description: Name must match the name of one entry in pod.spec.resourceClaims - of the Pod where this field is used. It makes that resource - available inside a container. + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. type: string required: - name @@ -3945,8 +4014,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources - allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -3955,50 +4025,49 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object tolerations: description: Sets tolerations for the EdgeConnect pods items: - description: The pod this Toleration is attached to tolerates any - taint that matches the triple using the matching - operator . + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . properties: effect: - description: Effect indicates the taint effect to match. Empty - means match all taint effects. When specified, allowed values - are NoSchedule, PreferNoSchedule and NoExecute. + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. type: string key: - description: Key is the taint key that the toleration applies - to. Empty means match all taint keys. If the key is empty, - operator must be Exists; this combination means to match all - values and all keys. + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. type: string operator: - description: Operator represents a key's relationship to the - value. Valid operators are Exists and Equal. Defaults to Equal. - Exists is equivalent to wildcard for value, so that a pod - can tolerate all taints of a particular category. + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. type: string tolerationSeconds: - description: TolerationSeconds represents the period of time - the toleration (which must be of effect NoExecute, otherwise - this field is ignored) tolerates the taint. By default, it - is not set, which means tolerate the taint forever (do not - evict). Zero and negative values will be treated as 0 (evict - immediately) by the system. + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. format: int64 type: integer value: - description: Value is the taint value the toleration matches - to. If the operator is Exists, the value should be empty, - otherwise just a regular string. + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. type: string type: object type: array @@ -4010,33 +4079,34 @@ spec: pods among the given topology. properties: labelSelector: - description: LabelSelector is used to find matching pods. Pods - that match this label selector are counted to determine the - number of pods in their corresponding topology domain. + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. - If the operator is In or NotIn, the values array - must be non-empty. If the operator is Exists or - DoesNotExist, the values array must be empty. This - array is replaced during a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array @@ -4048,126 +4118,134 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is - "key", the operator is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic matchLabelKeys: - description: "MatchLabelKeys is a set of pod label keys to select - the pods over which spreading will be calculated. The keys - are used to lookup values from the incoming pod labels, those - key-value labels are ANDed with labelSelector to select the - group of existing pods over which spreading will be calculated - for the incoming pod. The same key is forbidden to exist in - both MatchLabelKeys and LabelSelector. MatchLabelKeys cannot - be set when LabelSelector isn't set. Keys that don't exist - in the incoming pod labels will be ignored. A null or empty - list means only match against labelSelector. \n This is a - beta field and requires the MatchLabelKeysInPodTopologySpread - feature gate to be enabled (enabled by default)." + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). items: type: string type: array x-kubernetes-list-type: atomic maxSkew: - description: 'MaxSkew describes the degree to which pods may - be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, - it is the maximum permitted difference between the number - of matching pods in the target topology and the global minimum. - The global minimum is the minimum number of matching pods - in an eligible domain or zero if the number of eligible domains - is less than MinDomains. For example, in a 3-zone cluster, - MaxSkew is set to 1, and pods with the same labelSelector - spread as 2/2/1: In this case, the global minimum is 1. | - zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew - is 1, incoming pod can only be scheduled to zone3 to become - 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) - on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming - pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, - it is used to give higher precedence to topologies that satisfy - it. It''s a required field. Default value is 1 and 0 is not - allowed.' + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. format: int32 type: integer minDomains: - description: "MinDomains indicates a minimum number of eligible - domains. When the number of eligible domains with matching - topology keys is less than minDomains, Pod Topology Spread - treats \"global minimum\" as 0, and then the calculation of - Skew is performed. And when the number of eligible domains - with matching topology keys equals or greater than minDomains, - this value has no effect on scheduling. As a result, when - the number of eligible domains is less than minDomains, scheduler - won't schedule more than maxSkew Pods to those domains. If - value is nil, the constraint behaves as if MinDomains is equal - to 1. Valid values are integers greater than 0. When value - is not nil, WhenUnsatisfiable must be DoNotSchedule. \n For - example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains - is set to 5 and pods with the same labelSelector spread as - 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | - The number of domains is less than 5(MinDomains), so \"global - minimum\" is treated as 0. In this situation, new pod with - the same labelSelector cannot be scheduled, because computed - skew will be 3(3 - 0) if new Pod is scheduled to any of the - three zones, it will violate MaxSkew. \n This is a beta field - and requires the MinDomainsInPodTopologySpread feature gate - to be enabled (enabled by default)." + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + + + This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default). format: int32 type: integer nodeAffinityPolicy: - description: "NodeAffinityPolicy indicates how we will treat - Pod's nodeAffinity/nodeSelector when calculating pod topology - spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector - are included in the calculations. - Ignore: nodeAffinity/nodeSelector - are ignored. All nodes are included in the calculations. \n - If this value is nil, the behavior is equivalent to the Honor - policy. This is a beta-level feature default enabled by the - NodeInclusionPolicyInPodTopologySpread feature flag." + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string nodeTaintsPolicy: - description: "NodeTaintsPolicy indicates how we will treat node - taints when calculating pod topology spread skew. Options - are: - Honor: nodes without taints, along with tainted nodes - for which the incoming pod has a toleration, are included. + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. - \n If this value is nil, the behavior is equivalent to the - Ignore policy. This is a beta-level feature default enabled - by the NodeInclusionPolicyInPodTopologySpread feature flag." + + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string topologyKey: - description: TopologyKey is the key of node labels. Nodes that - have a label with this key and identical values are considered - to be in the same topology. We consider each - as a "bucket", and try to put balanced number of pods into - each bucket. We define a domain as a particular instance of - a topology. Also, we define an eligible domain as a domain - whose nodes meet the requirements of nodeAffinityPolicy and - nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", - each Node is a domain of that topology. And, if TopologyKey - is "topology.kubernetes.io/zone", each zone is a domain of - that topology. It's a required field. + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. type: string whenUnsatisfiable: - description: 'WhenUnsatisfiable indicates how to deal with a - pod if it doesn''t satisfy the spread constraint. - DoNotSchedule - (default) tells the scheduler not to schedule it. - ScheduleAnyway - tells the scheduler to schedule the pod in any location, but - giving higher precedence to topologies that would help reduce - the skew. A constraint is considered "Unsatisfiable" for an - incoming pod if and only if every possible node assignment - for that pod would violate "MaxSkew" on some topology. For - example, in a 3-zone cluster, MaxSkew is set to 1, and pods - with the same labelSelector spread as 3/1/1: | zone1 | zone2 - | zone3 | | P P P | P | P | If WhenUnsatisfiable is - set to DoNotSchedule, incoming pod can only be scheduled to - zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on - zone2(zone3) satisfies MaxSkew(1). In other words, the cluster - can still be imbalanced, but scheduler won''t make it *more* - imbalanced. It''s a required field.' + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. type: string required: - maxSkew @@ -4180,49 +4258,49 @@ spec: - oauth type: object status: - description: EdgeConnectStatus defines the observed state of EdgeConnect + description: EdgeConnectStatus defines the observed state of EdgeConnect. properties: conditions: description: Conditions includes status about the current state of the instance items: description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + state of this API Resource.\n---\nThis struct is intended for + direct use as an array at the field path .status.conditions. For + example,\n\n\n\ttype FooStatus struct{\n\t // Represents the + observations of a foo's current state.\n\t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // + +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t + \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t + \ // other fields\n\t}" properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 @@ -4236,11 +4314,12 @@ spec: - Unknown type: string type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + --- + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + useful (see .node.status.conditions), the ability to deconflict is important. + The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string diff --git a/charts/dynatrace/dynatrace-operator/templates/Common/csi/daemonset.yaml b/charts/dynatrace/dynatrace-operator/templates/Common/csi/daemonset.yaml index 687950eb0..79dc013a5 100644 --- a/charts/dynatrace/dynatrace-operator/templates/Common/csi/daemonset.yaml +++ b/charts/dynatrace/dynatrace-operator/templates/Common/csi/daemonset.yaml @@ -102,7 +102,6 @@ spec: periodSeconds: 5 successThreshold: 1 timeoutSeconds: 1 - {{- include "dynatrace-operator.startupProbe" . | nindent 8 }} ports: - containerPort: 10080 name: livez @@ -142,6 +141,7 @@ spec: - name: MAX_UNMOUNTED_VOLUME_AGE value: "{{ .Values.csidriver.maxUnmountedVolumeAge}}" {{- end }} + {{- include "dynatrace-operator.startupProbe" . | nindent 8 }} livenessProbe: failureThreshold: 3 httpGet: diff --git a/charts/dynatrace/dynatrace-operator/templates/Common/operator/clusterrole-operator.yaml b/charts/dynatrace/dynatrace-operator/templates/Common/operator/clusterrole-operator.yaml index c3a274d47..d5c055578 100644 --- a/charts/dynatrace/dynatrace-operator/templates/Common/operator/clusterrole-operator.yaml +++ b/charts/dynatrace/dynatrace-operator/templates/Common/operator/clusterrole-operator.yaml @@ -16,7 +16,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: {{ .Release.Name }} + name: dynatrace-operator labels: {{- include "dynatrace-operator.operatorLabels" . | nindent 4 }} rules: @@ -50,7 +50,7 @@ rules: resourceNames: - dynatrace-dynakube-config - dynatrace-data-ingest-endpoint - - dynatrace-activegate-internal-proxy + - dynatrace-internal-proxy verbs: - get - update @@ -87,6 +87,7 @@ rules: - customresourcedefinitions resourceNames: - dynakubes.dynatrace.com + - edgeconnects.dynatrace.com verbs: - get - update @@ -105,15 +106,15 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: {{ .Release.Name }} + name: dynatrace-operator labels: {{- include "dynatrace-operator.operatorLabels" . | nindent 4 }} subjects: - kind: ServiceAccount - name: {{ .Release.Name }} + name: dynatrace-operator namespace: {{ .Release.Namespace }} roleRef: kind: ClusterRole - name: {{ .Release.Name }} + name: dynatrace-operator apiGroup: rbac.authorization.k8s.io {{ end }} diff --git a/charts/dynatrace/dynatrace-operator/templates/Common/operator/deployment-operator.yaml b/charts/dynatrace/dynatrace-operator/templates/Common/operator/deployment-operator.yaml index 60663188f..4b92b2c5e 100644 --- a/charts/dynatrace/dynatrace-operator/templates/Common/operator/deployment-operator.yaml +++ b/charts/dynatrace/dynatrace-operator/templates/Common/operator/deployment-operator.yaml @@ -16,7 +16,7 @@ apiVersion: apps/v1 kind: Deployment metadata: - name: {{ .Release.Name }} + name: dynatrace-operator namespace: {{ .Release.Namespace }} labels: {{- include "dynatrace-operator.operatorLabels" . | nindent 4 }} @@ -36,7 +36,7 @@ spec: annotations: dynatrace.com/inject: "false" {{- if (.Values.operator).apparmor}} - container.apparmor.security.beta.kubernetes.io/{{ .Release.Name }}: runtime/default + container.apparmor.security.beta.kubernetes.io/operator: runtime/default {{- end }} {{- if .Values.operator.annotations }} {{- toYaml .Values.operator.annotations | nindent 8 }} @@ -49,7 +49,7 @@ spec: {{- end }} spec: containers: - - name: {{ .Release.Name }} + - name: operator args: - operator # Replace this with the built image name @@ -105,7 +105,7 @@ spec: volumes: - emptyDir: { } name: tmp-cert-dir - serviceAccountName: {{ .Release.Name }} + serviceAccountName: dynatrace-operator securityContext: {{- toYaml .Values.operator.podSecurityContext | nindent 8 }} {{- if .Values.customPullSecret }} diff --git a/charts/dynatrace/dynatrace-operator/templates/Common/operator/role-operator.yaml b/charts/dynatrace/dynatrace-operator/templates/Common/operator/role-operator.yaml index ce63934b7..b3f63c5e2 100644 --- a/charts/dynatrace/dynatrace-operator/templates/Common/operator/role-operator.yaml +++ b/charts/dynatrace/dynatrace-operator/templates/Common/operator/role-operator.yaml @@ -16,7 +16,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: - name: {{ .Release.Name }} + name: dynatrace-operator namespace: {{ .Release.Namespace }} labels: {{- include "dynatrace-operator.operatorLabels" . | nindent 4 }} @@ -172,15 +172,15 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: - name: {{ .Release.Name }} + name: dynatrace-operator namespace: {{ .Release.Namespace }} labels: {{- include "dynatrace-operator.operatorLabels" . | nindent 4 }} subjects: - kind: ServiceAccount - name: {{ .Release.Name }} + name: dynatrace-operator roleRef: kind: Role - name: {{ .Release.Name }} + name: dynatrace-operator apiGroup: rbac.authorization.k8s.io {{ end }} diff --git a/charts/dynatrace/dynatrace-operator/templates/Common/operator/serviceaccount-operator.yaml b/charts/dynatrace/dynatrace-operator/templates/Common/operator/serviceaccount-operator.yaml index 8a261d722..30705324e 100644 --- a/charts/dynatrace/dynatrace-operator/templates/Common/operator/serviceaccount-operator.yaml +++ b/charts/dynatrace/dynatrace-operator/templates/Common/operator/serviceaccount-operator.yaml @@ -16,7 +16,7 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: {{ .Release.Name }} + name: dynatrace-operator namespace: {{ .Release.Namespace }} labels: {{- include "dynatrace-operator.operatorLabels" . | nindent 4 }} diff --git a/charts/dynatrace/dynatrace-operator/templates/NOTES.txt b/charts/dynatrace/dynatrace-operator/templates/NOTES.txt index 8ff8ac567..48d4f09fc 100644 --- a/charts/dynatrace/dynatrace-operator/templates/NOTES.txt +++ b/charts/dynatrace/dynatrace-operator/templates/NOTES.txt @@ -7,4 +7,4 @@ https://github.com/Dynatrace/dynatrace-operator To verify the current state of the deployments, try: $ kubectl get pods -n {{ .Release.Namespace }} - $ kubectl logs -f deployment/{{ .Release.Name }} -n {{ .Release.Namespace }} \ No newline at end of file + $ kubectl logs -f deployment/dynatrace-operator -n {{ .Release.Namespace }} diff --git a/charts/dynatrace/dynatrace-operator/templates/_helpers.tpl b/charts/dynatrace/dynatrace-operator/templates/_helpers.tpl index 1d2b51624..ad40fde3d 100644 --- a/charts/dynatrace/dynatrace-operator/templates/_helpers.tpl +++ b/charts/dynatrace/dynatrace-operator/templates/_helpers.tpl @@ -31,8 +31,10 @@ Check if default image or imageref is used {{- .Values.imageRef.tag | default (printf "v%s" .Chart.AppVersion) | printf "%s:%s" .Values.imageRef.repository -}} {{- else if eq (include "dynatrace-operator.platform" .) "google-marketplace" -}} {{- printf "%s:%s" "gcr.io/dynatrace-marketplace-prod/dynatrace-operator" .Chart.AppVersion }} + {{- else if eq (include "dynatrace-operator.platform" .) "azure-marketplace" -}} + {{- printf "%s/%s@%s" .Values.global.azure.images.operator.registry .Values.global.azure.images.operator.image .Values.global.azure.images.operator.digest }} {{- else -}} - {{- printf "%s:v%s" "docker.io/dynatrace/dynatrace-operator" .Chart.AppVersion }} + {{- printf "%s:v%s" "public.ecr.aws/dynatrace/dynatrace-operator" .Chart.AppVersion }} {{- end -}} {{- end -}} {{- end -}} diff --git a/charts/dynatrace/dynatrace-operator/templates/_labels.tpl b/charts/dynatrace/dynatrace-operator/templates/_labels.tpl index a41dbe092..e66473db0 100644 --- a/charts/dynatrace/dynatrace-operator/templates/_labels.tpl +++ b/charts/dynatrace/dynatrace-operator/templates/_labels.tpl @@ -16,7 +16,7 @@ Selector labels */}} {{- define "dynatrace-operator.futureSelectorLabels" -}} -app.kubernetes.io/name: {{ .Release.Name }} +app.kubernetes.io/name: dynatrace-operator {{- if not (.Values).manifests }} app.kubernetes.io/managed-by: {{ .Release.Service }} {{- end -}} @@ -33,6 +33,9 @@ app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} {{- if not (.Values).manifests }} helm.sh/chart: {{ include "dynatrace-operator.chart" . }} {{- end -}} +{{- if eq (include "dynatrace-operator.platform" .) "azure-marketplace" }} +azure-extensions-usage-release-identifier: {{ .Release.Name | quote }} +{{- end -}} {{- end -}} {{/* diff --git a/charts/dynatrace/dynatrace-operator/templates/_platform.tpl b/charts/dynatrace/dynatrace-operator/templates/_platform.tpl index c8a862e44..0958774cd 100644 --- a/charts/dynatrace/dynatrace-operator/templates/_platform.tpl +++ b/charts/dynatrace/dynatrace-operator/templates/_platform.tpl @@ -40,7 +40,7 @@ Exclude Kubernetes manifest not running on OLM Check if platform is set to a valid one */}} {{- define "dynatrace-operator.platformIsValid" -}} -{{- $validPlatforms := list "kubernetes" "openshift" "google-marketplace" "gke-autopilot" -}} +{{- $validPlatforms := list "kubernetes" "openshift" "google-marketplace" "gke-autopilot" "azure-marketplace" -}} {{- if has (include "dynatrace-operator.platform" .) $validPlatforms -}} {{ default "set" }} {{- end -}} diff --git a/charts/dynatrace/dynatrace-operator/templates/application.yaml b/charts/dynatrace/dynatrace-operator/templates/application.yaml index 5cd4dbe67..1dd17410d 100644 --- a/charts/dynatrace/dynatrace-operator/templates/application.yaml +++ b/charts/dynatrace/dynatrace-operator/templates/application.yaml @@ -61,7 +61,7 @@ spec: url: https://www.dynatrace.com/technologies/kubernetes-monitoring selector: matchLabels: - app.kubernetes.io/name: "{{ .Release.Name }}" + app.kubernetes.io/name: dynatrace-operator componentKinds: - group: apps/v1 kind: DaemonSet diff --git a/charts/dynatrace/dynatrace-operator/values.yaml b/charts/dynatrace/dynatrace-operator/values.yaml index 576ec4d8e..2619f7f63 100644 --- a/charts/dynatrace/dynatrace-operator/values.yaml +++ b/charts/dynatrace/dynatrace-operator/values.yaml @@ -25,7 +25,7 @@ imageRef: tag: "" #defaults to chart version customPullSecret: "" -installCRD: false +installCRD: true operator: nodeSelector: {} @@ -87,10 +87,10 @@ webhook: validatingWebhook: timeoutSeconds: 10 mutatingWebhook: - timeoutSeconds: 2 + timeoutSeconds: 10 csidriver: - enabled: false + enabled: true nodeSelector: {} kubeletPath: "/var/lib/kubelet" existingPriorityClassName: "" # if defined, use this priorityclass instead of creating a new one diff --git a/charts/external-secrets/external-secrets/Chart.yaml b/charts/external-secrets/external-secrets/Chart.yaml index 66eb5465d..b6b2ae5eb 100644 --- a/charts/external-secrets/external-secrets/Chart.yaml +++ b/charts/external-secrets/external-secrets/Chart.yaml @@ -4,7 +4,7 @@ annotations: catalog.cattle.io/kube-version: '>= 1.19.0-0' catalog.cattle.io/release-name: external-secrets apiVersion: v2 -appVersion: v0.9.13 +appVersion: v0.9.14 description: External secret management for Kubernetes home: https://github.com/external-secrets/external-secrets icon: https://raw.githubusercontent.com/external-secrets/external-secrets/main/assets/eso-logo-large.png @@ -17,4 +17,4 @@ maintainers: name: mcavoyk name: external-secrets type: application -version: 0.9.13 +version: 0.9.14 diff --git a/charts/external-secrets/external-secrets/README.md b/charts/external-secrets/external-secrets/README.md index d65d82a64..4a3f140b1 100644 --- a/charts/external-secrets/external-secrets/README.md +++ b/charts/external-secrets/external-secrets/README.md @@ -1,10 +1,10 @@ # External Secrets -

+

external-secrets

[//]: # (README.md generated by gotmpl. DO NOT EDIT.) -![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.9.13](https://img.shields.io/badge/Version-0.9.13-informational?style=flat-square) +![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.9.14](https://img.shields.io/badge/Version-0.9.14-informational?style=flat-square) External secret management for Kubernetes @@ -98,6 +98,10 @@ The command removes all the Kubernetes components associated with the chart and | extraVolumeMounts | list | `[]` | | | extraVolumes | list | `[]` | | | fullnameOverride | string | `""` | | +| global.affinity | object | `{}` | | +| global.nodeSelector | object | `{}` | | +| global.tolerations | list | `[]` | | +| global.topologySpreadConstraints | list | `[]` | | | hostNetwork | bool | `false` | Run the controller on the host network | | image.flavour | string | `""` | The flavour of tag you want to use There are different image flavours available, like distroless and ubi. Please see GitHub release notes for image tags for these flavors. By default the distroless image is used. | | image.pullPolicy | string | `"IfNotPresent"` | | @@ -111,6 +115,7 @@ The command removes all the Kubernetes components associated with the chart and | metrics.service.enabled | bool | `false` | Enable if you use another monitoring tool than Prometheus to scrape the metrics | | metrics.service.port | int | `8080` | Metrics service port to scrape | | nameOverride | string | `""` | | +| namespaceOverride | string | `""` | | | nodeSelector | object | `{}` | | | podAnnotations | object | `{}` | Annotations to add to Pod | | podDisruptionBudget | object | `{"enabled":false,"minAvailable":1}` | Pod disruption budget - for more details see https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ | diff --git a/charts/external-secrets/external-secrets/README.md.gotmpl b/charts/external-secrets/external-secrets/README.md.gotmpl index 7c1b60d23..75a583c33 100644 --- a/charts/external-secrets/external-secrets/README.md.gotmpl +++ b/charts/external-secrets/external-secrets/README.md.gotmpl @@ -2,7 +2,7 @@ {{- $org := "external-secrets" -}} # External Secrets -

+

external-secrets

[//]: # (README.md generated by gotmpl. DO NOT EDIT.) diff --git a/charts/external-secrets/external-secrets/templates/NOTES.txt b/charts/external-secrets/external-secrets/templates/NOTES.txt index 2887d22be..ffa0fc7e1 100644 --- a/charts/external-secrets/external-secrets/templates/NOTES.txt +++ b/charts/external-secrets/external-secrets/templates/NOTES.txt @@ -1,8 +1,7 @@ -external-secrets has been deployed successfully! +external-secrets has been deployed successfully in namespace {{ template "external-secrets.namespace" . }}! In order to begin using ExternalSecrets, you will need to set up a SecretStore or ClusterSecretStore resource (for example, by creating a 'vault' SecretStore). More information on the different types of SecretStores and how to configure them can be found in our Github: {{ .Chart.Home }} - diff --git a/charts/external-secrets/external-secrets/templates/_helpers.tpl b/charts/external-secrets/external-secrets/templates/_helpers.tpl index 5b0f306b0..54b482cc7 100644 --- a/charts/external-secrets/external-secrets/templates/_helpers.tpl +++ b/charts/external-secrets/external-secrets/templates/_helpers.tpl @@ -23,6 +23,17 @@ If release name contains chart name it will be used as a full name. {{- end }} {{- end }} +{{/* +Define namespace of chart, useful for multi-namespace deployments +*/}} +{{- define "external-secrets.namespace" -}} +{{- if .Values.namespaceOverride }} +{{- .Values.namespaceOverride }} +{{- else }} +{{- .Release.Namespace }} +{{- end }} +{{- end }} + {{/* Create chart name and version as used by the chart label. */}} diff --git a/charts/external-secrets/external-secrets/templates/cert-controller-deployment.yaml b/charts/external-secrets/external-secrets/templates/cert-controller-deployment.yaml index 31949bcb6..54974f47f 100644 --- a/charts/external-secrets/external-secrets/templates/cert-controller-deployment.yaml +++ b/charts/external-secrets/external-secrets/templates/cert-controller-deployment.yaml @@ -3,7 +3,7 @@ apiVersion: apps/v1 kind: Deployment metadata: name: {{ include "external-secrets.fullname" . }}-cert-controller - namespace: {{ .Release.Namespace | quote }} + namespace: {{ template "external-secrets.namespace" . }} labels: {{- include "external-secrets-cert-controller.labels" . | nindent 4 }} {{- with .Values.certController.deploymentAnnotations }} @@ -51,9 +51,9 @@ spec: - certcontroller - --crd-requeue-interval={{ .Values.certController.requeueInterval }} - --service-name={{ include "external-secrets.fullname" . }}-webhook - - --service-namespace={{ .Release.Namespace }} + - --service-namespace={{ template "external-secrets.namespace" . }} - --secret-name={{ include "external-secrets.fullname" . }}-webhook - - --secret-namespace={{ .Release.Namespace }} + - --secret-namespace={{ template "external-secrets.namespace" . }} - --metrics-addr=:{{ .Values.certController.metrics.listen.port }} - --healthz-addr={{ .Values.certController.readinessProbe.address }}:{{ .Values.certController.readinessProbe.port }} {{ if not .Values.crds.createClusterSecretStore -}} @@ -93,19 +93,19 @@ spec: volumes: {{- toYaml .Values.certController.extraVolumes | nindent 8 }} {{- end }} - {{- with .Values.certController.nodeSelector }} + {{- with .Values.certController.nodeSelector | default .Values.global.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} {{- end }} - {{- with .Values.certController.affinity }} + {{- with .Values.certController.affinity | default .Values.global.affinity }} affinity: {{- toYaml . | nindent 8 }} {{- end }} - {{- with .Values.certController.tolerations }} + {{- with .Values.certController.tolerations | default .Values.global.tolerations }} tolerations: {{- toYaml . | nindent 8 }} {{- end }} - {{- with .Values.certController.topologySpreadConstraints }} + {{- with .Values.certController.topologySpreadConstraints | default .Values.global.topologySpreadConstraints }} topologySpreadConstraints: {{- toYaml . | nindent 8 }} {{- end }} diff --git a/charts/external-secrets/external-secrets/templates/cert-controller-poddisruptionbudget.yaml b/charts/external-secrets/external-secrets/templates/cert-controller-poddisruptionbudget.yaml index 5eca1a93e..e61cb8ebc 100644 --- a/charts/external-secrets/external-secrets/templates/cert-controller-poddisruptionbudget.yaml +++ b/charts/external-secrets/external-secrets/templates/cert-controller-poddisruptionbudget.yaml @@ -3,7 +3,7 @@ apiVersion: policy/v1 kind: PodDisruptionBudget metadata: name: {{ include "external-secrets.fullname" . }}-cert-controller-pdb - namespace: {{ .Release.Namespace | quote }} + namespace: {{ template "external-secrets.namespace" . }} labels: {{- include "external-secrets-cert-controller.labels" . | nindent 4 }} spec: diff --git a/charts/external-secrets/external-secrets/templates/cert-controller-rbac.yaml b/charts/external-secrets/external-secrets/templates/cert-controller-rbac.yaml index 62dbe3fae..43c2306a6 100644 --- a/charts/external-secrets/external-secrets/templates/cert-controller-rbac.yaml +++ b/charts/external-secrets/external-secrets/templates/cert-controller-rbac.yaml @@ -73,6 +73,6 @@ roleRef: name: {{ include "external-secrets.fullname" . }}-cert-controller subjects: - name: {{ include "external-secrets-cert-controller.serviceAccountName" . }} - namespace: {{ .Release.Namespace | quote }} + namespace: {{ template "external-secrets.namespace" . }} kind: ServiceAccount {{- end }} diff --git a/charts/external-secrets/external-secrets/templates/cert-controller-service.yaml b/charts/external-secrets/external-secrets/templates/cert-controller-service.yaml index 570dc041f..41309e889 100644 --- a/charts/external-secrets/external-secrets/templates/cert-controller-service.yaml +++ b/charts/external-secrets/external-secrets/templates/cert-controller-service.yaml @@ -3,6 +3,7 @@ apiVersion: v1 kind: Service metadata: name: {{ include "external-secrets.fullname" . }}-cert-controller-metrics + namespace: {{ template "external-secrets.namespace" . }} labels: {{- include "external-secrets.labels" . | nindent 4 }} {{- with .Values.metrics.service.annotations }} diff --git a/charts/external-secrets/external-secrets/templates/cert-controller-serviceaccount.yaml b/charts/external-secrets/external-secrets/templates/cert-controller-serviceaccount.yaml index 4fb0644fc..6a36f9d71 100644 --- a/charts/external-secrets/external-secrets/templates/cert-controller-serviceaccount.yaml +++ b/charts/external-secrets/external-secrets/templates/cert-controller-serviceaccount.yaml @@ -3,7 +3,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: {{ include "external-secrets-cert-controller.serviceAccountName" . }} - namespace: {{ .Release.Namespace | quote }} + namespace: {{ template "external-secrets.namespace" . }} labels: {{- include "external-secrets-cert-controller.labels" . | nindent 4 }} {{- with .Values.certController.serviceAccount.extraLabels }} diff --git a/charts/external-secrets/external-secrets/templates/crds/clustersecretstore.yaml b/charts/external-secrets/external-secrets/templates/crds/clustersecretstore.yaml index c434136cc..128b634e5 100644 --- a/charts/external-secrets/external-secrets/templates/crds/clustersecretstore.yaml +++ b/charts/external-secrets/external-secrets/templates/crds/clustersecretstore.yaml @@ -905,6 +905,46 @@ spec: - region - vault type: object + passworddepot: + description: Configures a store to sync secrets with a Password Depot instance. + properties: + auth: + description: Auth configures how secret-manager authenticates with a Password Depot instance. + properties: + secretRef: + properties: + credentials: + description: Username / Password is used for authentication. + properties: + key: + description: |- + The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be + defaulted, in others it may be required. + type: string + name: + description: The name of the Secret resource being referred to. + type: string + namespace: + description: |- + Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults + to the namespace of the referent. + type: string + type: object + type: object + required: + - secretRef + type: object + database: + description: Database to use as source + type: string + host: + description: URL configures the Password Depot instance URL. + type: string + required: + - auth + - database + - host + type: object vault: description: Vault configures this store to sync secrets using Hashi provider properties: @@ -2169,6 +2209,11 @@ spec: properties: account: type: string + hostId: + description: |- + Optional HostID for JWT authentication. This may be used depending + on how the Conjur JWT authenticator policy is configured. + type: string secretRef: description: |- Optional SecretRef that refers to a key in a Secret resource containing JWT token to @@ -2412,6 +2457,34 @@ spec: required: - data type: object + fortanix: + description: Fortanix configures this store to sync secrets using the Fortanix provider + properties: + apiKey: + description: APIKey is the API token to access SDKMS Applications. + properties: + secretRef: + description: SecretRef is a reference to a secret containing the SDKMS API Key. + properties: + key: + description: |- + The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be + defaulted, in others it may be required. + type: string + name: + description: The name of the Secret resource being referred to. + type: string + namespace: + description: |- + Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults + to the namespace of the referent. + type: string + type: object + type: object + apiUrl: + description: APIURL is the URL of SDKMS API. Defaults to `sdkms.fortanix.com`. + type: string + type: object gcpsm: description: GCPSM configures this store to sync secrets using Google Cloud Platform Secret Manager provider properties: @@ -2743,6 +2816,70 @@ spec: required: - auth type: object + onboardbase: + description: Onboardbase configures this store to sync secrets using the Onboardbase provider + properties: + apiHost: + default: https://public.onboardbase.com/api/v1/ + description: APIHost use this to configure the host url for the API for selfhosted installation, default is https://public.onboardbase.com/api/v1/ + type: string + auth: + description: Auth configures how the Operator authenticates with the Onboardbase API + properties: + apiKeyRef: + description: |- + OnboardbaseAPIKey is the APIKey generated by an admin account. + It is used to recognize and authorize access to a project and environment within onboardbase + properties: + key: + description: |- + The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be + defaulted, in others it may be required. + type: string + name: + description: The name of the Secret resource being referred to. + type: string + namespace: + description: |- + Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults + to the namespace of the referent. + type: string + type: object + passcodeRef: + description: OnboardbasePasscode is the passcode attached to the API Key + properties: + key: + description: |- + The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be + defaulted, in others it may be required. + type: string + name: + description: The name of the Secret resource being referred to. + type: string + namespace: + description: |- + Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults + to the namespace of the referent. + type: string + type: object + required: + - apiKeyRef + - passcodeRef + type: object + environment: + default: development + description: Environment is the name of an environmnent within a project to pull the secrets from + type: string + project: + default: development + description: Project is an onboardbase project that the secrets should be pulled from + type: string + required: + - apiHost + - auth + - environment + - project + type: object onepassword: description: OnePassword configures this store to sync secrets using the 1Password Cloud provider properties: @@ -2903,6 +3040,46 @@ spec: - region - vault type: object + passworddepot: + description: Configures a store to sync secrets with a Password Depot instance. + properties: + auth: + description: Auth configures how secret-manager authenticates with a Password Depot instance. + properties: + secretRef: + properties: + credentials: + description: Username / Password is used for authentication. + properties: + key: + description: |- + The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be + defaulted, in others it may be required. + type: string + name: + description: The name of the Secret resource being referred to. + type: string + namespace: + description: |- + Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults + to the namespace of the referent. + type: string + type: object + type: object + required: + - secretRef + type: object + database: + description: Database to use as source + type: string + host: + description: URL configures the Password Depot instance URL. + type: string + required: + - auth + - database + - host + type: object pulumi: description: Pulumi configures this store to sync secrets using the Pulumi provider properties: @@ -3475,6 +3652,14 @@ spec: - path - username type: object + namespace: + description: |- + Name of the vault namespace to authenticate to. This can be different than the namespace your secret is in. + Namespaces is a set of features within Vault Enterprise that allows + Vault environments to support Secure Multi-tenancy. e.g: "ns1". + More about namespaces can be found here https://www.vaultproject.io/docs/enterprise/namespaces + This will default to Vault.Namespace field if set, or empty otherwise + type: string tokenSecretRef: description: TokenSecretRef authenticates with Vault by presenting a token. properties: diff --git a/charts/external-secrets/external-secrets/templates/crds/pushsecret.yaml b/charts/external-secrets/external-secrets/templates/crds/pushsecret.yaml index 42b45dcde..7df9d810f 100644 --- a/charts/external-secrets/external-secrets/templates/crds/pushsecret.yaml +++ b/charts/external-secrets/external-secrets/templates/crds/pushsecret.yaml @@ -266,6 +266,13 @@ spec: type: type: string type: object + updatePolicy: + default: Replace + description: 'UpdatePolicy to handle Secrets in the provider. Possible Values: "Replace/IfNotExists". Defaults to "Replace".' + enum: + - Replace + - IfNotExists + type: string required: - secretStoreRefs - selector @@ -335,7 +342,9 @@ spec: - match type: object type: object - description: Synced Push Secrets for later deletion. Matches Secret Stores to PushSecretData that was stored to that secretStore. + description: |- + Synced PushSecrets, including secrets that already exist in provider. + Matches secret stores to PushSecretData that was stored to that secret store. type: object syncedResourceVersion: description: SyncedResourceVersion keeps track of the last synced version. diff --git a/charts/external-secrets/external-secrets/templates/crds/secretstore.yaml b/charts/external-secrets/external-secrets/templates/crds/secretstore.yaml index afc8d7a01..e08eaaff7 100644 --- a/charts/external-secrets/external-secrets/templates/crds/secretstore.yaml +++ b/charts/external-secrets/external-secrets/templates/crds/secretstore.yaml @@ -905,6 +905,46 @@ spec: - region - vault type: object + passworddepot: + description: Configures a store to sync secrets with a Password Depot instance. + properties: + auth: + description: Auth configures how secret-manager authenticates with a Password Depot instance. + properties: + secretRef: + properties: + credentials: + description: Username / Password is used for authentication. + properties: + key: + description: |- + The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be + defaulted, in others it may be required. + type: string + name: + description: The name of the Secret resource being referred to. + type: string + namespace: + description: |- + Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults + to the namespace of the referent. + type: string + type: object + type: object + required: + - secretRef + type: object + database: + description: Database to use as source + type: string + host: + description: URL configures the Password Depot instance URL. + type: string + required: + - auth + - database + - host + type: object vault: description: Vault configures this store to sync secrets using Hashi provider properties: @@ -2169,6 +2209,11 @@ spec: properties: account: type: string + hostId: + description: |- + Optional HostID for JWT authentication. This may be used depending + on how the Conjur JWT authenticator policy is configured. + type: string secretRef: description: |- Optional SecretRef that refers to a key in a Secret resource containing JWT token to @@ -2412,6 +2457,34 @@ spec: required: - data type: object + fortanix: + description: Fortanix configures this store to sync secrets using the Fortanix provider + properties: + apiKey: + description: APIKey is the API token to access SDKMS Applications. + properties: + secretRef: + description: SecretRef is a reference to a secret containing the SDKMS API Key. + properties: + key: + description: |- + The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be + defaulted, in others it may be required. + type: string + name: + description: The name of the Secret resource being referred to. + type: string + namespace: + description: |- + Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults + to the namespace of the referent. + type: string + type: object + type: object + apiUrl: + description: APIURL is the URL of SDKMS API. Defaults to `sdkms.fortanix.com`. + type: string + type: object gcpsm: description: GCPSM configures this store to sync secrets using Google Cloud Platform Secret Manager provider properties: @@ -2743,6 +2816,70 @@ spec: required: - auth type: object + onboardbase: + description: Onboardbase configures this store to sync secrets using the Onboardbase provider + properties: + apiHost: + default: https://public.onboardbase.com/api/v1/ + description: APIHost use this to configure the host url for the API for selfhosted installation, default is https://public.onboardbase.com/api/v1/ + type: string + auth: + description: Auth configures how the Operator authenticates with the Onboardbase API + properties: + apiKeyRef: + description: |- + OnboardbaseAPIKey is the APIKey generated by an admin account. + It is used to recognize and authorize access to a project and environment within onboardbase + properties: + key: + description: |- + The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be + defaulted, in others it may be required. + type: string + name: + description: The name of the Secret resource being referred to. + type: string + namespace: + description: |- + Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults + to the namespace of the referent. + type: string + type: object + passcodeRef: + description: OnboardbasePasscode is the passcode attached to the API Key + properties: + key: + description: |- + The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be + defaulted, in others it may be required. + type: string + name: + description: The name of the Secret resource being referred to. + type: string + namespace: + description: |- + Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults + to the namespace of the referent. + type: string + type: object + required: + - apiKeyRef + - passcodeRef + type: object + environment: + default: development + description: Environment is the name of an environmnent within a project to pull the secrets from + type: string + project: + default: development + description: Project is an onboardbase project that the secrets should be pulled from + type: string + required: + - apiHost + - auth + - environment + - project + type: object onepassword: description: OnePassword configures this store to sync secrets using the 1Password Cloud provider properties: @@ -2903,6 +3040,46 @@ spec: - region - vault type: object + passworddepot: + description: Configures a store to sync secrets with a Password Depot instance. + properties: + auth: + description: Auth configures how secret-manager authenticates with a Password Depot instance. + properties: + secretRef: + properties: + credentials: + description: Username / Password is used for authentication. + properties: + key: + description: |- + The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be + defaulted, in others it may be required. + type: string + name: + description: The name of the Secret resource being referred to. + type: string + namespace: + description: |- + Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults + to the namespace of the referent. + type: string + type: object + type: object + required: + - secretRef + type: object + database: + description: Database to use as source + type: string + host: + description: URL configures the Password Depot instance URL. + type: string + required: + - auth + - database + - host + type: object pulumi: description: Pulumi configures this store to sync secrets using the Pulumi provider properties: @@ -3475,6 +3652,14 @@ spec: - path - username type: object + namespace: + description: |- + Name of the vault namespace to authenticate to. This can be different than the namespace your secret is in. + Namespaces is a set of features within Vault Enterprise that allows + Vault environments to support Secure Multi-tenancy. e.g: "ns1". + More about namespaces can be found here https://www.vaultproject.io/docs/enterprise/namespaces + This will default to Vault.Namespace field if set, or empty otherwise + type: string tokenSecretRef: description: TokenSecretRef authenticates with Vault by presenting a token. properties: diff --git a/charts/external-secrets/external-secrets/templates/crds/vaultdynamicsecret.yaml b/charts/external-secrets/external-secrets/templates/crds/vaultdynamicsecret.yaml index bdd9c4161..381b2318c 100644 --- a/charts/external-secrets/external-secrets/templates/crds/vaultdynamicsecret.yaml +++ b/charts/external-secrets/external-secrets/templates/crds/vaultdynamicsecret.yaml @@ -476,6 +476,14 @@ spec: - path - username type: object + namespace: + description: |- + Name of the vault namespace to authenticate to. This can be different than the namespace your secret is in. + Namespaces is a set of features within Vault Enterprise that allows + Vault environments to support Secure Multi-tenancy. e.g: "ns1". + More about namespaces can be found here https://www.vaultproject.io/docs/enterprise/namespaces + This will default to Vault.Namespace field if set, or empty otherwise + type: string tokenSecretRef: description: TokenSecretRef authenticates with Vault by presenting a token. properties: diff --git a/charts/external-secrets/external-secrets/templates/deployment.yaml b/charts/external-secrets/external-secrets/templates/deployment.yaml index 3dafc2c9d..c0584c53c 100644 --- a/charts/external-secrets/external-secrets/templates/deployment.yaml +++ b/charts/external-secrets/external-secrets/templates/deployment.yaml @@ -3,7 +3,7 @@ apiVersion: apps/v1 kind: Deployment metadata: name: {{ include "external-secrets.fullname" . }} - namespace: {{ .Release.Namespace | quote }} + namespace: {{ template "external-secrets.namespace" . }} labels: {{- include "external-secrets.labels" . | nindent 4 }} {{- with .Values.deploymentAnnotations }} @@ -114,19 +114,19 @@ spec: volumes: {{- toYaml .Values.extraVolumes | nindent 8 }} {{- end }} - {{- with .Values.nodeSelector }} + {{- with .Values.nodeSelector | default .Values.global.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} {{- end }} - {{- with .Values.affinity }} + {{- with .Values.affinity | default .Values.global.affinity }} affinity: {{- toYaml . | nindent 8 }} {{- end }} - {{- with .Values.tolerations }} + {{- with .Values.tolerations | default .Values.global.tolerations }} tolerations: {{- toYaml . | nindent 8 }} {{- end }} - {{- with .Values.topologySpreadConstraints }} + {{- with .Values.topologySpreadConstraints | default .Values.global.topologySpreadConstraints }} topologySpreadConstraints: {{- toYaml . | nindent 8 }} {{- end }} diff --git a/charts/external-secrets/external-secrets/templates/poddisruptionbudget.yaml b/charts/external-secrets/external-secrets/templates/poddisruptionbudget.yaml index abe51d337..7b75ca3f4 100644 --- a/charts/external-secrets/external-secrets/templates/poddisruptionbudget.yaml +++ b/charts/external-secrets/external-secrets/templates/poddisruptionbudget.yaml @@ -3,7 +3,7 @@ apiVersion: policy/v1 kind: PodDisruptionBudget metadata: name: {{ include "external-secrets.fullname" . }}-pdb - namespace: {{ .Release.Namespace | quote }} + namespace: {{ template "external-secrets.namespace" . }} labels: {{- include "external-secrets.labels" . | nindent 4 }} spec: @@ -16,4 +16,4 @@ spec: selector: matchLabels: {{- include "external-secrets.selectorLabels" . | nindent 6 }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/external-secrets/external-secrets/templates/rbac.yaml b/charts/external-secrets/external-secrets/templates/rbac.yaml index da5d648ca..1bf5a2296 100644 --- a/charts/external-secrets/external-secrets/templates/rbac.yaml +++ b/charts/external-secrets/external-secrets/templates/rbac.yaml @@ -220,14 +220,14 @@ roleRef: name: {{ include "external-secrets.fullname" . }}-controller subjects: - name: {{ include "external-secrets.serviceAccountName" . }} - namespace: {{ .Release.Namespace | quote }} + namespace: {{ template "external-secrets.namespace" . }} kind: ServiceAccount --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: {{ include "external-secrets.fullname" . }}-leaderelection - namespace: {{ .Release.Namespace | quote }} + namespace: {{ template "external-secrets.namespace" . }} labels: {{- include "external-secrets.labels" . | nindent 4 }} rules: @@ -261,7 +261,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: {{ include "external-secrets.fullname" . }}-leaderelection - namespace: {{ .Release.Namespace | quote }} + namespace: {{ template "external-secrets.namespace" . }} labels: {{- include "external-secrets.labels" . | nindent 4 }} roleRef: @@ -271,7 +271,7 @@ roleRef: subjects: - kind: ServiceAccount name: {{ include "external-secrets.serviceAccountName" . }} - namespace: {{ .Release.Namespace | quote }} + namespace: {{ template "external-secrets.namespace" . }} {{- if .Values.rbac.servicebindings.create }} --- apiVersion: rbac.authorization.k8s.io/v1 diff --git a/charts/external-secrets/external-secrets/templates/service.yaml b/charts/external-secrets/external-secrets/templates/service.yaml index bf56fdea8..6e17b7e18 100644 --- a/charts/external-secrets/external-secrets/templates/service.yaml +++ b/charts/external-secrets/external-secrets/templates/service.yaml @@ -3,7 +3,7 @@ apiVersion: v1 kind: Service metadata: name: {{ include "external-secrets.fullname" . }}-metrics - namespace: {{ .Release.Namespace | quote }} + namespace: {{ template "external-secrets.namespace" . }} labels: {{- include "external-secrets.labels" . | nindent 4 }} {{- with .Values.metrics.service.annotations }} diff --git a/charts/external-secrets/external-secrets/templates/serviceaccount.yaml b/charts/external-secrets/external-secrets/templates/serviceaccount.yaml index fd61c7069..ceaa98e1c 100644 --- a/charts/external-secrets/external-secrets/templates/serviceaccount.yaml +++ b/charts/external-secrets/external-secrets/templates/serviceaccount.yaml @@ -3,7 +3,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: {{ include "external-secrets.serviceAccountName" . }} - namespace: {{ .Release.Namespace | quote }} + namespace: {{ template "external-secrets.namespace" . }} labels: {{- include "external-secrets.labels" . | nindent 4 }} {{- with .Values.serviceAccount.extraLabels }} diff --git a/charts/external-secrets/external-secrets/templates/servicemonitor.yaml b/charts/external-secrets/external-secrets/templates/servicemonitor.yaml index 63c9da02c..06e74d0f1 100644 --- a/charts/external-secrets/external-secrets/templates/servicemonitor.yaml +++ b/charts/external-secrets/external-secrets/templates/servicemonitor.yaml @@ -3,7 +3,7 @@ apiVersion: v1 kind: Service metadata: name: {{ include "external-secrets.fullname" . }}-metrics - namespace: {{ .Release.Namespace | quote }} + namespace: {{ template "external-secrets.namespace" . }} labels: {{- include "external-secrets.labels" . | nindent 4 }} spec: @@ -24,14 +24,14 @@ metadata: {{ toYaml .Values.serviceMonitor.additionalLabels | indent 4 }} {{- end }} name: {{ include "external-secrets.fullname" . }}-metrics - namespace: {{ .Values.serviceMonitor.namespace | default .Release.Namespace | quote }} + namespace: {{ .Values.serviceMonitor.namespace | default (include "external-secrets.namespace" .) | quote }} spec: selector: matchLabels: {{- include "external-secrets.selectorLabels" . | nindent 6 }} namespaceSelector: matchNames: - - {{ .Release.Namespace | quote }} + - {{ template "external-secrets.namespace" . }} endpoints: - port: metrics interval: {{ .Values.serviceMonitor.interval }} @@ -51,7 +51,7 @@ apiVersion: v1 kind: Service metadata: name: {{ include "external-secrets.fullname" . }}-webhook-metrics - namespace: {{ .Release.Namespace | quote }} + namespace: {{ template "external-secrets.namespace" . }} labels: {{- include "external-secrets-webhook-metrics.labels" . | nindent 4 }} spec: @@ -72,14 +72,14 @@ metadata: {{ toYaml .Values.serviceMonitor.additionalLabels | indent 4 }} {{- end }} name: {{ include "external-secrets.fullname" . }}-webhook-metrics - namespace: {{ .Values.serviceMonitor.namespace | default .Release.Namespace | quote }} + namespace: {{ .Values.serviceMonitor.namespace | default (include "external-secrets.namespace" .) | quote }} spec: selector: matchLabels: {{- include "external-secrets-webhook-metrics.labels" . | nindent 6 }} namespaceSelector: matchNames: - - {{ .Release.Namespace | quote }} + - {{ template "external-secrets.namespace" . }} endpoints: - port: metrics interval: {{ .Values.serviceMonitor.interval }} @@ -100,7 +100,7 @@ apiVersion: v1 kind: Service metadata: name: {{ include "external-secrets.fullname" . }}-cert-controller-metrics - namespace: {{ .Release.Namespace | quote }} + namespace: {{ template "external-secrets.namespace" . }} labels: {{- include "external-secrets-cert-controller-metrics.labels" . | nindent 4 }} spec: @@ -121,14 +121,14 @@ metadata: {{ toYaml .Values.serviceMonitor.additionalLabels | indent 4 }} {{- end }} name: {{ include "external-secrets.fullname" . }}-cert-controller-metrics - namespace: {{ .Values.serviceMonitor.namespace | default .Release.Namespace | quote }} + namespace: {{ .Values.serviceMonitor.namespace | default (include "external-secrets.namespace" .) | quote }} spec: selector: matchLabels: {{- include "external-secrets-cert-controller-metrics.labels" . | nindent 6 }} namespaceSelector: matchNames: - - {{ .Release.Namespace | quote }} + - {{ template "external-secrets.namespace" . }} endpoints: - port: metrics interval: {{ .Values.serviceMonitor.interval }} diff --git a/charts/external-secrets/external-secrets/templates/validatingwebhook.yaml b/charts/external-secrets/external-secrets/templates/validatingwebhook.yaml index a365b3666..63b39763f 100644 --- a/charts/external-secrets/external-secrets/templates/validatingwebhook.yaml +++ b/charts/external-secrets/external-secrets/templates/validatingwebhook.yaml @@ -10,7 +10,7 @@ metadata: {{- end }} {{- if and .Values.webhook.certManager.enabled .Values.webhook.certManager.addInjectorAnnotations }} annotations: - cert-manager.io/inject-ca-from: {{ .Release.Namespace }}/{{ include "external-secrets.fullname" . }}-webhook + cert-manager.io/inject-ca-from: {{ template "external-secrets.namespace" . }}/{{ include "external-secrets.fullname" . }}-webhook {{- end }} webhooks: - name: "validate.secretstore.external-secrets.io" @@ -22,7 +22,7 @@ webhooks: scope: "Namespaced" clientConfig: service: - namespace: {{ .Release.Namespace | quote }} + namespace: {{ template "external-secrets.namespace" . }} name: {{ include "external-secrets.fullname" . }}-webhook path: /validate-external-secrets-io-v1beta1-secretstore admissionReviewVersions: ["v1", "v1beta1"] @@ -38,7 +38,7 @@ webhooks: scope: "Cluster" clientConfig: service: - namespace: {{ .Release.Namespace | quote }} + namespace: {{ template "external-secrets.namespace" . }} name: {{ include "external-secrets.fullname" . }}-webhook path: /validate-external-secrets-io-v1beta1-clustersecretstore admissionReviewVersions: ["v1", "v1beta1"] @@ -56,7 +56,7 @@ metadata: {{- end }} {{- if and .Values.webhook.certManager.enabled .Values.webhook.certManager.addInjectorAnnotations }} annotations: - cert-manager.io/inject-ca-from: {{ .Release.Namespace }}/{{ include "external-secrets.fullname" . }}-webhook + cert-manager.io/inject-ca-from: {{ template "external-secrets.namespace" . }}/{{ include "external-secrets.fullname" . }}-webhook {{- end }} webhooks: - name: "validate.externalsecret.external-secrets.io" @@ -68,7 +68,7 @@ webhooks: scope: "Namespaced" clientConfig: service: - namespace: {{ .Release.Namespace | quote }} + namespace: {{ template "external-secrets.namespace" . }} name: {{ include "external-secrets.fullname" . }}-webhook path: /validate-external-secrets-io-v1beta1-externalsecret admissionReviewVersions: ["v1", "v1beta1"] diff --git a/charts/external-secrets/external-secrets/templates/webhook-certificate.yaml b/charts/external-secrets/external-secrets/templates/webhook-certificate.yaml index d8aff1a6d..adb19fd95 100644 --- a/charts/external-secrets/external-secrets/templates/webhook-certificate.yaml +++ b/charts/external-secrets/external-secrets/templates/webhook-certificate.yaml @@ -4,7 +4,7 @@ apiVersion: cert-manager.io/v1 kind: Certificate metadata: name: {{ include "external-secrets.fullname" . }}-webhook - namespace: {{ .Release.Namespace | quote }} + namespace: {{ template "external-secrets.namespace" . }} labels: {{- include "external-secrets-webhook.labels" . | nindent 4 }} external-secrets.io/component: webhook @@ -16,8 +16,8 @@ spec: commonName: {{ include "external-secrets.fullname" . }}-webhook dnsNames: - {{ include "external-secrets.fullname" . }}-webhook - - {{ include "external-secrets.fullname" . }}-webhook.{{ .Release.Namespace }} - - {{ include "external-secrets.fullname" . }}-webhook.{{ .Release.Namespace }}.svc + - {{ include "external-secrets.fullname" . }}-webhook.{{ template "external-secrets.namespace" . }} + - {{ include "external-secrets.fullname" . }}-webhook.{{ template "external-secrets.namespace" . }}.svc issuerRef: {{- toYaml .Values.webhook.certManager.cert.issuerRef | nindent 4 }} {{- with .Values.webhook.certManager.cert.duration }} diff --git a/charts/external-secrets/external-secrets/templates/webhook-deployment.yaml b/charts/external-secrets/external-secrets/templates/webhook-deployment.yaml index f5d640d5b..2fe6f3f6e 100644 --- a/charts/external-secrets/external-secrets/templates/webhook-deployment.yaml +++ b/charts/external-secrets/external-secrets/templates/webhook-deployment.yaml @@ -3,7 +3,7 @@ apiVersion: apps/v1 kind: Deployment metadata: name: {{ include "external-secrets.fullname" . }}-webhook - namespace: {{ .Release.Namespace | quote }} + namespace: {{ template "external-secrets.namespace" . }} labels: {{- include "external-secrets-webhook.labels" . | nindent 4 }} {{- with .Values.webhook.deploymentAnnotations }} @@ -50,7 +50,7 @@ spec: args: - webhook - --port={{ .Values.webhook.port }} - - --dns-name={{ include "external-secrets.fullname" . }}-webhook.{{ .Release.Namespace }}.svc + - --dns-name={{ include "external-secrets.fullname" . }}-webhook.{{ template "external-secrets.namespace" . }}.svc - --cert-dir={{ .Values.webhook.certDir }} - --check-interval={{ .Values.webhook.certCheckInterval }} - --metrics-addr=:{{ .Values.webhook.metrics.listen.port }} @@ -100,19 +100,19 @@ spec: {{- if .Values.webhook.extraVolumes }} {{- toYaml .Values.webhook.extraVolumes | nindent 8 }} {{- end }} - {{- with .Values.webhook.nodeSelector }} + {{- with .Values.webhook.nodeSelector | default .Values.global.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} {{- end }} - {{- with .Values.webhook.affinity }} + {{- with .Values.webhook.affinity | default .Values.global.affinity }} affinity: {{- toYaml . | nindent 8 }} {{- end }} - {{- with .Values.webhook.tolerations }} + {{- with .Values.webhook.tolerations | default .Values.global.tolerations }} tolerations: {{- toYaml . | nindent 8 }} {{- end }} - {{- with .Values.webhook.topologySpreadConstraints }} + {{- with .Values.webhook.topologySpreadConstraints | default .Values.global.topologySpreadConstraints }} topologySpreadConstraints: {{- toYaml . | nindent 8 }} {{- end }} diff --git a/charts/external-secrets/external-secrets/templates/webhook-poddisruptionbudget.yaml b/charts/external-secrets/external-secrets/templates/webhook-poddisruptionbudget.yaml index 665de97a5..58345ba68 100644 --- a/charts/external-secrets/external-secrets/templates/webhook-poddisruptionbudget.yaml +++ b/charts/external-secrets/external-secrets/templates/webhook-poddisruptionbudget.yaml @@ -3,7 +3,7 @@ apiVersion: policy/v1 kind: PodDisruptionBudget metadata: name: {{ include "external-secrets.fullname" . }}-webhook-pdb - namespace: {{ .Release.Namespace | quote }} + namespace: {{ template "external-secrets.namespace" . }} labels: {{- include "external-secrets-webhook.labels" . | nindent 4 }} external-secrets.io/component: webhook @@ -17,4 +17,4 @@ spec: selector: matchLabels: {{- include "external-secrets-webhook.selectorLabels" . | nindent 6 }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/external-secrets/external-secrets/templates/webhook-secret.yaml b/charts/external-secrets/external-secrets/templates/webhook-secret.yaml index 667a7b98b..fa7760ed6 100644 --- a/charts/external-secrets/external-secrets/templates/webhook-secret.yaml +++ b/charts/external-secrets/external-secrets/templates/webhook-secret.yaml @@ -3,7 +3,7 @@ apiVersion: v1 kind: Secret metadata: name: {{ include "external-secrets.fullname" . }}-webhook - namespace: {{ .Release.Namespace | quote }} + namespace: {{ template "external-secrets.namespace" . }} labels: {{- include "external-secrets-webhook.labels" . | nindent 4 }} external-secrets.io/component: webhook diff --git a/charts/external-secrets/external-secrets/templates/webhook-service.yaml b/charts/external-secrets/external-secrets/templates/webhook-service.yaml index ec2001dbd..f2b4bbdcd 100644 --- a/charts/external-secrets/external-secrets/templates/webhook-service.yaml +++ b/charts/external-secrets/external-secrets/templates/webhook-service.yaml @@ -3,7 +3,7 @@ apiVersion: v1 kind: Service metadata: name: {{ include "external-secrets.fullname" . }}-webhook - namespace: {{ .Release.Namespace | quote }} + namespace: {{ template "external-secrets.namespace" . }} labels: {{- include "external-secrets-webhook.labels" . | nindent 4 }} external-secrets.io/component: webhook diff --git a/charts/external-secrets/external-secrets/templates/webhook-serviceaccount.yaml b/charts/external-secrets/external-secrets/templates/webhook-serviceaccount.yaml index 1c4a14b1b..193621842 100644 --- a/charts/external-secrets/external-secrets/templates/webhook-serviceaccount.yaml +++ b/charts/external-secrets/external-secrets/templates/webhook-serviceaccount.yaml @@ -3,7 +3,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: {{ include "external-secrets-webhook.serviceAccountName" . }} - namespace: {{ .Release.Namespace | quote }} + namespace: {{ template "external-secrets.namespace" . }} labels: {{- include "external-secrets-webhook.labels" . | nindent 4 }} {{- with .Values.webhook.serviceAccount.extraLabels }} diff --git a/charts/external-secrets/external-secrets/tests/__snapshot__/cert_controller_test.yaml.snap b/charts/external-secrets/external-secrets/tests/__snapshot__/cert_controller_test.yaml.snap index 958e00a84..d24c81bcc 100644 --- a/charts/external-secrets/external-secrets/tests/__snapshot__/cert_controller_test.yaml.snap +++ b/charts/external-secrets/external-secrets/tests/__snapshot__/cert_controller_test.yaml.snap @@ -7,8 +7,8 @@ should match snapshot of default values: app.kubernetes.io/instance: RELEASE-NAME app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: external-secrets-cert-controller - app.kubernetes.io/version: v0.9.13 - helm.sh/chart: external-secrets-0.9.13 + app.kubernetes.io/version: v0.9.14 + helm.sh/chart: external-secrets-0.9.14 name: RELEASE-NAME-external-secrets-cert-controller namespace: NAMESPACE spec: @@ -24,8 +24,8 @@ should match snapshot of default values: app.kubernetes.io/instance: RELEASE-NAME app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: external-secrets-cert-controller - app.kubernetes.io/version: v0.9.13 - helm.sh/chart: external-secrets-0.9.13 + app.kubernetes.io/version: v0.9.14 + helm.sh/chart: external-secrets-0.9.14 spec: automountServiceAccountToken: true containers: @@ -38,7 +38,7 @@ should match snapshot of default values: - --secret-namespace=NAMESPACE - --metrics-addr=:8080 - --healthz-addr=:8081 - image: ghcr.io/external-secrets/external-secrets:v0.9.13 + image: ghcr.io/external-secrets/external-secrets:v0.9.14 imagePullPolicy: IfNotPresent name: cert-controller ports: diff --git a/charts/external-secrets/external-secrets/tests/__snapshot__/controller_test.yaml.snap b/charts/external-secrets/external-secrets/tests/__snapshot__/controller_test.yaml.snap index 8ba1ca00b..ba9078964 100644 --- a/charts/external-secrets/external-secrets/tests/__snapshot__/controller_test.yaml.snap +++ b/charts/external-secrets/external-secrets/tests/__snapshot__/controller_test.yaml.snap @@ -7,8 +7,8 @@ should match snapshot of default values: app.kubernetes.io/instance: RELEASE-NAME app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: external-secrets - app.kubernetes.io/version: v0.9.13 - helm.sh/chart: external-secrets-0.9.13 + app.kubernetes.io/version: v0.9.14 + helm.sh/chart: external-secrets-0.9.14 name: RELEASE-NAME-external-secrets namespace: NAMESPACE spec: @@ -24,15 +24,15 @@ should match snapshot of default values: app.kubernetes.io/instance: RELEASE-NAME app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: external-secrets - app.kubernetes.io/version: v0.9.13 - helm.sh/chart: external-secrets-0.9.13 + app.kubernetes.io/version: v0.9.14 + helm.sh/chart: external-secrets-0.9.14 spec: automountServiceAccountToken: true containers: - args: - --concurrent=1 - --metrics-addr=:8080 - image: ghcr.io/external-secrets/external-secrets:v0.9.13 + image: ghcr.io/external-secrets/external-secrets:v0.9.14 imagePullPolicy: IfNotPresent name: external-secrets ports: diff --git a/charts/external-secrets/external-secrets/tests/__snapshot__/crds_test.yaml.snap b/charts/external-secrets/external-secrets/tests/__snapshot__/crds_test.yaml.snap index 2dcd8dc13..e9178e685 100644 --- a/charts/external-secrets/external-secrets/tests/__snapshot__/crds_test.yaml.snap +++ b/charts/external-secrets/external-secrets/tests/__snapshot__/crds_test.yaml.snap @@ -910,6 +910,46 @@ should match snapshot of default values: - region - vault type: object + passworddepot: + description: Configures a store to sync secrets with a Password Depot instance. + properties: + auth: + description: Auth configures how secret-manager authenticates with a Password Depot instance. + properties: + secretRef: + properties: + credentials: + description: Username / Password is used for authentication. + properties: + key: + description: |- + The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be + defaulted, in others it may be required. + type: string + name: + description: The name of the Secret resource being referred to. + type: string + namespace: + description: |- + Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults + to the namespace of the referent. + type: string + type: object + type: object + required: + - secretRef + type: object + database: + description: Database to use as source + type: string + host: + description: URL configures the Password Depot instance URL. + type: string + required: + - auth + - database + - host + type: object vault: description: Vault configures this store to sync secrets using Hashi provider properties: @@ -2174,6 +2214,11 @@ should match snapshot of default values: properties: account: type: string + hostId: + description: |- + Optional HostID for JWT authentication. This may be used depending + on how the Conjur JWT authenticator policy is configured. + type: string secretRef: description: |- Optional SecretRef that refers to a key in a Secret resource containing JWT token to @@ -2417,6 +2462,34 @@ should match snapshot of default values: required: - data type: object + fortanix: + description: Fortanix configures this store to sync secrets using the Fortanix provider + properties: + apiKey: + description: APIKey is the API token to access SDKMS Applications. + properties: + secretRef: + description: SecretRef is a reference to a secret containing the SDKMS API Key. + properties: + key: + description: |- + The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be + defaulted, in others it may be required. + type: string + name: + description: The name of the Secret resource being referred to. + type: string + namespace: + description: |- + Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults + to the namespace of the referent. + type: string + type: object + type: object + apiUrl: + description: APIURL is the URL of SDKMS API. Defaults to `sdkms.fortanix.com`. + type: string + type: object gcpsm: description: GCPSM configures this store to sync secrets using Google Cloud Platform Secret Manager provider properties: @@ -2748,6 +2821,70 @@ should match snapshot of default values: required: - auth type: object + onboardbase: + description: Onboardbase configures this store to sync secrets using the Onboardbase provider + properties: + apiHost: + default: https://public.onboardbase.com/api/v1/ + description: APIHost use this to configure the host url for the API for selfhosted installation, default is https://public.onboardbase.com/api/v1/ + type: string + auth: + description: Auth configures how the Operator authenticates with the Onboardbase API + properties: + apiKeyRef: + description: |- + OnboardbaseAPIKey is the APIKey generated by an admin account. + It is used to recognize and authorize access to a project and environment within onboardbase + properties: + key: + description: |- + The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be + defaulted, in others it may be required. + type: string + name: + description: The name of the Secret resource being referred to. + type: string + namespace: + description: |- + Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults + to the namespace of the referent. + type: string + type: object + passcodeRef: + description: OnboardbasePasscode is the passcode attached to the API Key + properties: + key: + description: |- + The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be + defaulted, in others it may be required. + type: string + name: + description: The name of the Secret resource being referred to. + type: string + namespace: + description: |- + Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults + to the namespace of the referent. + type: string + type: object + required: + - apiKeyRef + - passcodeRef + type: object + environment: + default: development + description: Environment is the name of an environmnent within a project to pull the secrets from + type: string + project: + default: development + description: Project is an onboardbase project that the secrets should be pulled from + type: string + required: + - apiHost + - auth + - environment + - project + type: object onepassword: description: OnePassword configures this store to sync secrets using the 1Password Cloud provider properties: @@ -2908,6 +3045,46 @@ should match snapshot of default values: - region - vault type: object + passworddepot: + description: Configures a store to sync secrets with a Password Depot instance. + properties: + auth: + description: Auth configures how secret-manager authenticates with a Password Depot instance. + properties: + secretRef: + properties: + credentials: + description: Username / Password is used for authentication. + properties: + key: + description: |- + The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be + defaulted, in others it may be required. + type: string + name: + description: The name of the Secret resource being referred to. + type: string + namespace: + description: |- + Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults + to the namespace of the referent. + type: string + type: object + type: object + required: + - secretRef + type: object + database: + description: Database to use as source + type: string + host: + description: URL configures the Password Depot instance URL. + type: string + required: + - auth + - database + - host + type: object pulumi: description: Pulumi configures this store to sync secrets using the Pulumi provider properties: @@ -3480,6 +3657,14 @@ should match snapshot of default values: - path - username type: object + namespace: + description: |- + Name of the vault namespace to authenticate to. This can be different than the namespace your secret is in. + Namespaces is a set of features within Vault Enterprise that allows + Vault environments to support Secure Multi-tenancy. e.g: "ns1". + More about namespaces can be found here https://www.vaultproject.io/docs/enterprise/namespaces + This will default to Vault.Namespace field if set, or empty otherwise + type: string tokenSecretRef: description: TokenSecretRef authenticates with Vault by presenting a token. properties: diff --git a/charts/external-secrets/external-secrets/tests/__snapshot__/webhook_test.yaml.snap b/charts/external-secrets/external-secrets/tests/__snapshot__/webhook_test.yaml.snap index 7ee9b01c5..bf1793037 100644 --- a/charts/external-secrets/external-secrets/tests/__snapshot__/webhook_test.yaml.snap +++ b/charts/external-secrets/external-secrets/tests/__snapshot__/webhook_test.yaml.snap @@ -7,8 +7,8 @@ should match snapshot of default values: app.kubernetes.io/instance: RELEASE-NAME app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: external-secrets-webhook - app.kubernetes.io/version: v0.9.13 - helm.sh/chart: external-secrets-0.9.13 + app.kubernetes.io/version: v0.9.14 + helm.sh/chart: external-secrets-0.9.14 name: RELEASE-NAME-external-secrets-webhook namespace: NAMESPACE spec: @@ -24,8 +24,8 @@ should match snapshot of default values: app.kubernetes.io/instance: RELEASE-NAME app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: external-secrets-webhook - app.kubernetes.io/version: v0.9.13 - helm.sh/chart: external-secrets-0.9.13 + app.kubernetes.io/version: v0.9.14 + helm.sh/chart: external-secrets-0.9.14 spec: automountServiceAccountToken: true containers: @@ -37,7 +37,7 @@ should match snapshot of default values: - --check-interval=5m - --metrics-addr=:8080 - --healthz-addr=:8081 - image: ghcr.io/external-secrets/external-secrets:v0.9.13 + image: ghcr.io/external-secrets/external-secrets:v0.9.14 imagePullPolicy: IfNotPresent name: webhook ports: @@ -81,8 +81,8 @@ should match snapshot of default values: app.kubernetes.io/instance: RELEASE-NAME app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: external-secrets-webhook - app.kubernetes.io/version: v0.9.13 + app.kubernetes.io/version: v0.9.14 external-secrets.io/component: webhook - helm.sh/chart: external-secrets-0.9.13 + helm.sh/chart: external-secrets-0.9.14 name: RELEASE-NAME-external-secrets-webhook namespace: NAMESPACE diff --git a/charts/external-secrets/external-secrets/values.yaml b/charts/external-secrets/external-secrets/values.yaml index f2f5597c1..ae24830c7 100644 --- a/charts/external-secrets/external-secrets/values.yaml +++ b/charts/external-secrets/external-secrets/values.yaml @@ -1,3 +1,9 @@ +global: + nodeSelector: {} + tolerations: [] + topologySpreadConstraints: [] + affinity: {} + replicaCount: 1 # -- Specifies the amount of historic ReplicaSets k8s should keep (see https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#clean-up-policy) @@ -31,6 +37,7 @@ crds: imagePullSecrets: [] nameOverride: "" fullnameOverride: "" +namespaceOverride: "" # -- Additional labels added to all helm chart resources. commonLabels: {} diff --git a/charts/f5/nginx-ingress/Chart.yaml b/charts/f5/nginx-ingress/Chart.yaml index 933831641..62891f17b 100644 --- a/charts/f5/nginx-ingress/Chart.yaml +++ b/charts/f5/nginx-ingress/Chart.yaml @@ -1,17 +1,17 @@ annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: NGINX Ingress Controller - catalog.cattle.io/kube-version: '>= 1.22.0-0' + catalog.cattle.io/kube-version: '>= 1.23.0-0' catalog.cattle.io/release-name: nginx-ingress apiVersion: v2 -appVersion: 3.4.3 +appVersion: 3.5.0 description: NGINX Ingress Controller home: https://github.com/nginxinc/kubernetes-ingress icon: https://raw.githubusercontent.com/nginxinc/kubernetes-ingress/v3.4.3/charts/nginx-ingress/chart-icon.png keywords: - ingress - nginx -kubeVersion: '>= 1.22.0-0' +kubeVersion: '>= 1.23.0-0' maintainers: - email: kubernetes@nginx.com name: nginxinc @@ -19,4 +19,4 @@ name: nginx-ingress sources: - https://github.com/nginxinc/kubernetes-ingress/tree/v3.4.3/charts/nginx-ingress type: application -version: 1.1.3 +version: 1.2.0 diff --git a/charts/f5/nginx-ingress/README.md b/charts/f5/nginx-ingress/README.md index 053585ad8..c8ce00f3a 100644 --- a/charts/f5/nginx-ingress/README.md +++ b/charts/f5/nginx-ingress/README.md @@ -2,7 +2,7 @@ ## Introduction -This chart deploys the NGINX Ingress Controller in your Kubernetes cluster. +This chart deploys NGINX Ingress Controller in your Kubernetes cluster. ## Prerequisites @@ -363,8 +363,8 @@ The following tables lists the configurable parameters of the NGINX Ingress Cont |`controller.config.annotations` | The annotations of the Ingress Controller configmap. | {} | |`controller.config.entries` | The entries of the ConfigMap for customizing NGINX configuration. See [ConfigMap resource docs](https://docs.nginx.com/nginx-ingress-controller/configuration/global-configuration/configmap-resource/) for the list of supported ConfigMap keys. | {} | |`controller.customPorts` | A list of custom ports to expose on the NGINX Ingress Controller pod. Follows the conventional Kubernetes yaml syntax for container ports. | [] | -|`controller.defaultTLS.cert` | The base64-encoded TLS certificate for the default HTTPS server. **Note:** It is recommended that you specify your own certificate. Alternatively, omitting the default server secret completely will configure NGINX to reject TLS connections to the default server. | -|`controller.defaultTLS.key` | The base64-encoded TLS key for the default HTTPS server. **Note:** It is recommended that you specify your own key. Alternatively, omitting the default server secret completely will configure NGINX to reject TLS connections to the default server. | +|`controller.defaultTLS.cert` | The base64-encoded TLS certificate for the default HTTPS server. **Note:** It is recommended that you specify your own certificate. Alternatively, omitting the default server secret completely will configure NGINX to reject TLS connections to the default server. | "" | +|`controller.defaultTLS.key` | The base64-encoded TLS key for the default HTTPS server. **Note:** It is recommended that you specify your own key. Alternatively, omitting the default server secret completely will configure NGINX to reject TLS connections to the default server. | "" | |`controller.defaultTLS.secret` | The secret with a TLS certificate and key for the default HTTPS server. The value must follow the following format: `/`. Used as an alternative to specifying a certificate and key using `controller.defaultTLS.cert` and `controller.defaultTLS.key` parameters. **Note:** Alternatively, omitting the default server secret completely will configure NGINX to reject TLS connections to the default server. | None | |`controller.wildcardTLS.cert` | The base64-encoded TLS certificate for every Ingress/VirtualServer host that has TLS enabled but no secret specified. If the parameter is not set, for such Ingress/VirtualServer hosts NGINX will break any attempt to establish a TLS connection. | None | |`controller.wildcardTLS.key` | The base64-encoded TLS key for every Ingress/VirtualServer host that has TLS enabled but no secret specified. If the parameter is not set, for such Ingress/VirtualServer hosts NGINX will break any attempt to establish a TLS connection. | None | @@ -379,12 +379,15 @@ The following tables lists the configurable parameters of the NGINX Ingress Cont |`controller.volumeMounts` | The volumeMounts of the Ingress Controller pods. | [] | |`controller.initContainers` | InitContainers for the Ingress Controller pods. | [] | |`controller.extraContainers` | Extra (eg. sidecar) containers for the Ingress Controller pods. | [] | +|`controller.podSecurityContext`| The SecurityContext for Ingress Controller pods. | "seccompProfile": {"type": "RuntimeDefault"} | +|`controller.securityContext`| The SecurityContext for Ingress Controller container. | {} | +|`controller.initContainerSecurityContext`| The SecurityContext for Ingress Controller init container when `readOnlyRootFilesystem` is enabled by either setting `controller.securityContext.readOnlyRootFilesystem` or `controller.readOnlyRootFilesystem`to `true`. | {} | |`controller.resources` | The resources of the Ingress Controller pods. | requests: cpu=100m,memory=128Mi | -|`controller.initContainerResources` | The resources of the init container which is used when `controller.readOnlyRootFilesystem` is set to `true` | requests: cpu=100m,memory=128Mi | +|`controller.initContainerResources` | The resources of the init container which is used when `readOnlyRootFilesystem` is enabled by either setting `controller.securityContext.readOnlyRootFilesystem` or `controller.readOnlyRootFilesystem`to `true`. | requests: cpu=100m,memory=128Mi | |`controller.replicaCount` | The number of replicas of the Ingress Controller deployment. | 1 | |`controller.ingressClass.name` | A class of the Ingress Controller. An IngressClass resource with the name equal to the class must be deployed. Otherwise, the Ingress Controller will fail to start. The Ingress Controller only processes resources that belong to its class - i.e. have the "ingressClassName" field resource equal to the class. The Ingress Controller processes all the VirtualServer/VirtualServerRoute/TransportServer resources that do not have the "ingressClassName" field for all versions of Kubernetes. | nginx | -|`controller.ingressClass.create` | Creates a new IngressClass object with the name `controller.ingressClass.name`. Set to `false` to use an existing ingressClass created using `kubectl` with the same name. If you use `helm upgrade`, do not change the values from the previous release as helm will delete IngressClass objects managed by helm. If you are upgrading from a release earlier than 3.3.0, do not set the value to false. | true | -|`controller.ingressClass.setAsDefaultIngress` | New Ingresses without an `"ingressClassName"` field specified will be assigned the class specified in `controller.ingressClass.name`. Requires `controller.ingressClass.create`. | false | +|`controller.ingressClass.create` | Creates a new IngressClass object with the name `controller.ingressClass.name`. Set to `false` to use an existing ingressClass created using `kubectl` with the same name. If you use `helm upgrade`, do not change the values from the previous release as helm will delete IngressClass objects managed by helm. If you are upgrading from a release earlier than 3.4.3, do not set the value to false. | true | +|`controller.ingressClass.setAsDefaultIngress` | New Ingresses without an `"ingressClassName"` field specified will be assigned the class specified in `controller.ingressClass.name`. Requires `controller.ingressClass.create`. | false | |`controller.watchNamespace` | Comma separated list of namespaces the Ingress Controller should watch for resources. By default the Ingress Controller watches all namespaces. Mutually exclusive with `controller.watchNamespaceLabel`. Please note that if configuring multiple namespaces using the Helm cli `--set` option, the string needs to wrapped in double quotes and the commas escaped using a backslash - e.g. `--set controller.watchNamespace="default\,nginx-ingress"`. | "" | |`controller.watchNamespaceLabel` | Configures the Ingress Controller to watch only those namespaces with label foo=bar. By default the Ingress Controller watches all namespaces. Mutually exclusive with `controller.watchNamespace`. | "" | |`controller.watchSecretNamespace` | Comma separated list of namespaces the Ingress Controller should watch for resources of type Secret. If this arg is not configured, the Ingress Controller watches the same namespaces for all resources. See `controller.watchNamespace` and `controller.watchNamespaceLabel`. Please note that if configuring multiple namespaces using the Helm cli `--set` option, the string needs to wrapped in double quotes and the commas escaped using a backslash - e.g. `--set controller.watchSecretNamespace="default\,nginx-ingress"`. | "" | @@ -463,10 +466,12 @@ The following tables lists the configurable parameters of the NGINX Ingress Cont |`controller.podDisruptionBudget.maxUnavailable` | The number of Ingress Controller pods that can be unavailable. This is a mutually exclusive setting with "minAvailable". | 0 | |`controller.strategy` | Specifies the strategy used to replace old Pods with new ones. Docs for [Deployment update strategy](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy) and [Daemonset update strategy](https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/#daemonset-update-strategy) | {} | |`controller.disableIPV6` | Disable IPV6 listeners explicitly for nodes that do not support the IPV6 stack. | false | -|`controller.defaultHTTPListenerPort` | Sets the port for the HTTP `default_server` listener. | 80 | -|`controller.defaultHTTPSListenerPort` | Sets the port for the HTTPS `default_server` listener. | 443 | -|`controller.readOnlyRootFilesystem` | Configure root filesystem as read-only and add volumes for temporary data. | false | +|`controller.defaultHTTPListenerPort` | Sets the port for the HTTP `default_server` listener. | 80 | +|`controller.defaultHTTPSListenerPort` | Sets the port for the HTTPS `default_server` listener. | 443 | +|`controller.readOnlyRootFilesystem` | Configure root filesystem as read-only and add volumes for temporary data. Three major releases after 3.5.x this argument will be moved permanently to the `controller.securityContext` section. | false | |`controller.enableSSLDynamicReload` | Enable lazy loading for SSL Certificates. | true | +|`controller.telemetryReporting.enable` | Enable telemetry reporting. | true | +|`controller.enableDynamicWeightChangesReload` | Enable weight changes without reloading the NGINX configuration. May require increasing map_hash_bucket_size, map_hash_max_size, variable_hash_bucket_size, and variable_hash_max_size in the [ConfigMap](https://docs.nginx.com/nginx-ingress-controller/configuration/global-configuration/configmap-resource/) if there are many two-way splits. Requires `controller.nginxplus` | false | |`rbac.create` | Configures RBAC. | true | |`prometheus.create` | Expose NGINX or NGINX Plus metrics in the Prometheus format. | true | |`prometheus.port` | Configures the port to scrape the metrics. | 9113 | @@ -485,6 +490,21 @@ The following tables lists the configurable parameters of the NGINX Ingress Cont |`serviceNameOverride` | Used to prevent cloud load balancers from being replaced due to service name change during helm upgrades. | "" | |`nginxServiceMesh.enable` | Enable integration with NGINX Service Mesh. See the NGINX Service Mesh [docs](https://docs.nginx.com/nginx-service-mesh/tutorials/kic/deploy-with-kic/) for more details. Requires `controller.nginxplus`. | false | |`nginxServiceMesh.enableEgress` | Enable NGINX Service Mesh workloads to route egress traffic through the Ingress Controller. See the NGINX Service Mesh [docs](https://docs.nginx.com/nginx-service-mesh/tutorials/kic/deploy-with-kic/#enabling-egress) for more details. Requires `nginxServiceMesh.enable`. | false | +|`nginxAgent.enable` | Enable NGINX Agent to integrate Security Monitoring and App Protect WAF modules. Requires `controller.appprotect.enable`. | false | +|`nginxAgent.instanceGroup` | Set a custom Instance Group name, shown when connected to NGINX Instance Manager. `nginx-ingress.controller.fullname` will be used if not set. | "" | +|`nginxAgent.logLevel` | Log level for NGINX Agent. | "error | +|`nginxAgent.instanceManager.host` | FQDN or IP for connecting to NGINX Ingress Controller. Required when `nginxAgent.enable` is set to `true` | "" | +|`nginxAgent.instanceManager.grpcPort` | Port for connecting to NGINX Ingress Controller. | 443 | +|`nginxAgent.instanceManager.sni` | Server Name Indication for NGINX Instance Manager. See the NGINX Agent [docs](https://docs.nginx.com/nginx-agent/configuration/encrypt-communication/) for more details. | "" | +|`nginxAgent.instanceManager.tls.enable` | Enable TLS for NGINX Instance Manager connection. | true | +|`nginxAgent.instanceManager.tls.skipVerify` | Skip certification verification for NGINX Instance Manager connection. | false | +|`nginxAgent.instanceManager.tls.caSecret` | Name of `nginx.org/ca` secret used for verification of NGINX Instance Manager TLS. | "" | +|`nginxAgent.instanceManager.tls.secret` | Name of `kubernetes.io/tls` secret with a TLS certificate and key for using mTLS between NGINX Agent and NGINX Instance Manager. See the NGINX Instance Manager [docs](https://docs.nginx.com/nginx-management-suite/admin-guides/configuration/secure-traffic/#mutual-client-certificate-auth-setup-mtls) and the NGINX Agent [docs](https://docs.nginx.com/nginx-agent/configuration/encrypt-communication/) for more details. | "" | +|`nginxAgent.syslog.host` | Address for NGINX Agent to run syslog listener. | 127.0.0.1 | +|`nginxAgent.syslog.port` | Port for NGINX Agent to run syslog listener. | 1514 | +|`nginxAgent.napMonitoring.collectorBufferSize` | Buffer size for collector. Will contain log lines and parsed log lines. | 50000 | +|`nginxAgent.napMonitoring.processorBufferSize` | Buffer size for processor. Will contain log lines and parsed log lines. | 50000 | +|`nginxAgent.customConfigMap` | The name of a custom ConfigMap to use instead of the one provided by default. | "" | ## Notes diff --git a/charts/f5/nginx-ingress/crds/k8s.nginx.org_policies.yaml b/charts/f5/nginx-ingress/crds/k8s.nginx.org_policies.yaml index 195406300..e31e43c67 100644 --- a/charts/f5/nginx-ingress/crds/k8s.nginx.org_policies.yaml +++ b/charts/f5/nginx-ingress/crds/k8s.nginx.org_policies.yaml @@ -187,6 +187,8 @@ spec: securityLog: description: SecurityLog defines the security log of a WAF policy. properties: + apLogBundle: + type: string apLogConf: type: string enable: @@ -198,6 +200,8 @@ spec: items: description: SecurityLog defines the security log of a WAF policy. properties: + apLogBundle: + type: string apLogConf: type: string enable: diff --git a/charts/f5/nginx-ingress/templates/_helpers.tpl b/charts/f5/nginx-ingress/templates/_helpers.tpl index 2f5add833..497e1f6cd 100644 --- a/charts/f5/nginx-ingress/templates/_helpers.tpl +++ b/charts/f5/nginx-ingress/templates/_helpers.tpl @@ -60,6 +60,24 @@ app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} app.kubernetes.io/managed-by: {{ .Release.Service }} {{- end }} +{{/* +Pod labels +*/}} +{{- define "nginx-ingress.podLabels" -}} +{{- include "nginx-ingress.selectorLabels" . }} +{{- if .Values.nginxServiceMesh.enable }} +nsm.nginx.com/enable-ingress: "true" +nsm.nginx.com/enable-egress: "{{ .Values.nginxServiceMesh.enableEgress }}" +nsm.nginx.com/{{ .Values.controller.kind }}: {{ include "nginx-ingress.controller.fullname" . }} +{{- end }} +{{- if and .Values.nginxAgent.enable (eq (.Values.nginxAgent.customConfigMap | default "") "") }} +agent-configuration-revision-hash: {{ include "nginx-ingress.agentConfiguration" . | sha1sum | trunc 8 | quote }} +{{- end }} +{{- if .Values.controller.pod.extraLabels }} +{{ toYaml .Values.controller.pod.extraLabels }} +{{- end }} +{{- end }} + {{/* Selector labels */}} @@ -83,6 +101,17 @@ Expand the name of the configmap. {{- end -}} {{- end -}} +{{/* +Expand the name of the configmap used for NGINX Agent. +*/}} +{{- define "nginx-ingress.agentConfigName" -}} +{{- if ne (.Values.nginxAgent.customConfigMap | default "") "" -}} +{{ .Values.nginxAgent.customConfigMap }} +{{- else -}} +{{- printf "%s-agent-config" (include "nginx-ingress.fullname" . | trunc 49 | trimSuffix "-") -}} +{{- end -}} +{{- end -}} + {{/* Expand leader election lock name. */}} @@ -134,10 +163,35 @@ Expand image name. {{- printf "%s-%s" (include "nginx-ingress.fullname" .) "prometheus-service" -}} {{- end -}} +{{/* +return if readOnlyRootFilesystem is enabled or not. +*/}} +{{- define "nginx-ingress.readOnlyRootFilesystem" -}} +{{- if or .Values.controller.readOnlyRootFilesystem (and .Values.controller.securityContext .Values.controller.securityContext.readOnlyRootFilesystem) -}} +true +{{- else -}} +false +{{- end -}} +{{- end -}} + {{/* Build the args for the service binary. */}} {{- define "nginx-ingress.args" -}} +{{- if and .Values.controller.debug .Values.controller.debug.enable }} +- --listen=:2345 +- --headless=true +- --log=true +- --log-output=debugger,debuglineerr,gdbwire,lldbout,rpc,dap,fncall,minidump,stack +- --accept-multiclient +- --api-version=2 +- exec +- ./nginx-ingress +{{- if .Values.controller.debug.continue }} +- --continue +{{- end }} +- -- +{{- end -}} - -nginx-plus={{ .Values.controller.nginxplus }} - -nginx-reload-timeout={{ .Values.controller.nginxReloadTimeout }} - -enable-app-protect={{ .Values.controller.appprotect.enable }} @@ -223,4 +277,145 @@ Build the args for the service binary. - -ready-status-port={{ .Values.controller.readyStatus.port }} - -enable-latency-metrics={{ .Values.controller.enableLatencyMetrics }} - -ssl-dynamic-reload={{ .Values.controller.enableSSLDynamicReload }} +- -enable-telemetry-reporting={{ .Values.controller.telemetryReporting.enable}} +- -weight-changes-dynamic-reload={{ .Values.controller.enableWeightChangesDynamicReload}} +{{- if .Values.nginxAgent.enable }} +- -agent=true +- -agent-instance-group={{ default (include "nginx-ingress.controller.fullname" .) .Values.nginxAgent.instanceGroup }} +{{- end }} {{- end -}} + +{{/* +Volumes for controller. +*/}} +{{- define "nginx-ingress.volumes" -}} +{{- $volumesSet := "false" }} +volumes: +{{- if eq (include "nginx-ingress.volumeEntries" .) "" -}} +{{ toYaml list | printf " %s" }} +{{- else }} +{{ include "nginx-ingress.volumeEntries" . }} +{{- end -}} +{{- end -}} + +{{/* +List of volumes for controller. +*/}} +{{- define "nginx-ingress.volumeEntries" -}} +{{- if eq (include "nginx-ingress.readOnlyRootFilesystem" .) "true" }} +- name: nginx-etc + emptyDir: {} +- name: nginx-cache + emptyDir: {} +- name: nginx-lib + emptyDir: {} +- name: nginx-log + emptyDir: {} +{{- end }} +{{- if .Values.controller.volumes }} +{{ toYaml .Values.controller.volumes }} +{{- end }} +{{- if .Values.nginxAgent.enable }} +- name: agent-conf + configMap: + name: {{ include "nginx-ingress.agentConfigName" . }} +- name: agent-dynamic + emptyDir: {} +{{- if and .Values.nginxAgent.instanceManager.tls (or (ne (.Values.nginxAgent.instanceManager.tls.secret | default "") "") (ne (.Values.nginxAgent.instanceManager.tls.caSecret | default "") "")) }} +- name: nginx-agent-tls + projected: + sources: +{{- if ne .Values.nginxAgent.instanceManager.tls.secret "" }} + - secret: + name: {{ .Values.nginxAgent.instanceManager.tls.secret }} +{{- end }} +{{- if ne .Values.nginxAgent.instanceManager.tls.caSecret "" }} + - secret: + name: {{ .Values.nginxAgent.instanceManager.tls.caSecret }} +{{- end }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* +Volume mounts for controller. +*/}} +{{- define "nginx-ingress.volumeMounts" -}} +{{- $volumesSet := "false" }} +volumeMounts: +{{- if eq (include "nginx-ingress.volumeMountEntries" .) "" -}} +{{ toYaml list | printf " %s" }} +{{- else }} +{{ include "nginx-ingress.volumeMountEntries" . }} +{{- end -}} +{{- end -}} + +{{- define "nginx-ingress.volumeMountEntries" -}} +{{- if eq (include "nginx-ingress.readOnlyRootFilesystem" .) "true" }} +- mountPath: /etc/nginx + name: nginx-etc +- mountPath: /var/cache/nginx + name: nginx-cache +- mountPath: /var/lib/nginx + name: nginx-lib +- mountPath: /var/log/nginx + name: nginx-log +{{- end }} +{{- if .Values.controller.volumeMounts }} +{{ toYaml .Values.controller.volumeMounts }} +{{- end }} +{{- if .Values.nginxAgent.enable }} +- name: agent-conf + mountPath: /etc/nginx-agent/nginx-agent.conf + subPath: nginx-agent.conf +- name: agent-dynamic + mountPath: /var/lib/nginx-agent +{{- if and .Values.nginxAgent.instanceManager.tls (or (ne (.Values.nginxAgent.instanceManager.tls.secret | default "") "") (ne (.Values.nginxAgent.instanceManager.tls.caSecret | default "") "")) }} +- name: nginx-agent-tls + mountPath: /etc/ssl/nms + readOnly: true +{{- end }} +{{- end -}} +{{- end -}} + +{{- define "nginx-ingress.agentConfiguration" -}} +log: + level: {{ .Values.nginxAgent.logLevel }} + path: "" +server: + host: {{ required ".Values.nginxAgent.instanceManager.host is required when setting .Values.nginxAgent.enable to true" .Values.nginxAgent.instanceManager.host }} + grpcPort: {{ .Values.nginxAgent.instanceManager.grpcPort }} +{{- if ne (.Values.nginxAgent.instanceManager.sni | default "") "" }} + metrics: {{ .Values.nginxAgent.instanceManager.sni }} + command: {{ .Values.nginxAgent.instanceManager.sni }} +{{- end }} +{{- if .Values.nginxAgent.instanceManager.tls }} +tls: + enable: {{ .Values.nginxAgent.instanceManager.tls.enable | default true }} + skip_verify: {{ .Values.nginxAgent.instanceManager.tls.skipVerify | default false }} + {{- if ne .Values.nginxAgent.instanceManager.tls.caSecret "" }} + ca: "/etc/ssl/nms/ca.crt" + {{- end }} + {{- if ne .Values.nginxAgent.instanceManager.tls.secret "" }} + cert: "/etc/ssl/nms/tls.crt" + key: "/etc/ssl/nms/tls.key" + {{- end }} +{{- end }} +features: + - registration + - nginx-counting + - metrics-sender + - dataplane-status +extensions: + - nginx-app-protect + - nap-monitoring +nginx_app_protect: + report_interval: 15s + precompiled_publication: true +nap_monitoring: + collector_buffer_size: {{ .Values.nginxAgent.napMonitoring.collectorBufferSize }} + processor_buffer_size: {{ .Values.nginxAgent.napMonitoring.processorBufferSize }} + syslog_ip: {{ .Values.nginxAgent.syslog.host }} + syslog_port: {{ .Values.nginxAgent.syslog.port }} + +{{ end -}} diff --git a/charts/f5/nginx-ingress/templates/clusterrole.yaml b/charts/f5/nginx-ingress/templates/clusterrole.yaml index 559006ff6..a231ca820 100644 --- a/charts/f5/nginx-ingress/templates/clusterrole.yaml +++ b/charts/f5/nginx-ingress/templates/clusterrole.yaml @@ -49,6 +49,19 @@ rules: - get - list - watch +- apiGroups: + - "" + resources: + - nodes + verbs: + - list +- apiGroups: + - "apps" + resources: + - replicasets + - daemonsets + verbs: + - get - apiGroups: - networking.k8s.io resources: diff --git a/charts/f5/nginx-ingress/templates/controller-configmap.yaml b/charts/f5/nginx-ingress/templates/controller-configmap.yaml index fd1199186..8f1d3e47b 100644 --- a/charts/f5/nginx-ingress/templates/controller-configmap.yaml +++ b/charts/f5/nginx-ingress/templates/controller-configmap.yaml @@ -11,7 +11,22 @@ metadata: {{ toYaml .Values.controller.config.annotations | indent 4 }} {{- end }} data: -{{- if .Values.controller.config.entries }} -{{ toYaml .Values.controller.config.entries | indent 2 }} +{{ toYaml (default dict .Values.controller.config.entries) | indent 2 }} {{- end }} +--- +{{- if and .Values.nginxAgent.enable (eq (.Values.nginxAgent.customConfigMap | default "") "") }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "nginx-ingress.agentConfigName" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "nginx-ingress.labels" . | nindent 4 }} +{{- if .Values.controller.config.annotations }} + annotations: +{{ toYaml .Values.controller.config.annotations | indent 4 }} +{{- end }} +data: + nginx-agent.conf: |- +{{ include "nginx-ingress.agentConfiguration" . | indent 4 }} {{- end }} diff --git a/charts/f5/nginx-ingress/templates/controller-daemonset.yaml b/charts/f5/nginx-ingress/templates/controller-daemonset.yaml index b2459c927..8da65c468 100644 --- a/charts/f5/nginx-ingress/templates/controller-daemonset.yaml +++ b/charts/f5/nginx-ingress/templates/controller-daemonset.yaml @@ -16,15 +16,7 @@ spec: template: metadata: labels: - {{- include "nginx-ingress.selectorLabels" . | nindent 8 }} -{{- if .Values.nginxServiceMesh.enable }} - nsm.nginx.com/enable-ingress: "true" - nsm.nginx.com/enable-egress: "{{ .Values.nginxServiceMesh.enableEgress }}" - nsm.nginx.com/daemonset: {{ include "nginx-ingress.controller.fullname" . }} -{{- end }} -{{- if .Values.controller.pod.extraLabels }} -{{ toYaml .Values.controller.pod.extraLabels | indent 8 }} -{{- end }} + {{- include "nginx-ingress.podLabels" . | nindent 8 }} {{- if or .Values.prometheus.create .Values.controller.pod.annotations }} annotations: {{- if .Values.prometheus.create }} @@ -40,8 +32,7 @@ spec: serviceAccountName: {{ include "nginx-ingress.serviceAccountName" . }} automountServiceAccountToken: true securityContext: - seccompProfile: - type: RuntimeDefault +{{ toYaml .Values.controller.podSecurityContext | indent 8 }} terminationGracePeriodSeconds: {{ .Values.controller.terminationGracePeriodSeconds }} {{- if .Values.controller.nodeSelector }} nodeSelector: @@ -55,22 +46,7 @@ spec: affinity: {{ toYaml .Values.controller.affinity | indent 8 }} {{- end }} -{{- if or .Values.controller.readOnlyRootFilesystem .Values.controller.volumes }} - volumes: -{{- end }} -{{- if .Values.controller.readOnlyRootFilesystem }} - - name: nginx-etc - emptyDir: {} - - name: nginx-cache - emptyDir: {} - - name: nginx-lib - emptyDir: {} - - name: nginx-log - emptyDir: {} -{{- end }} -{{- if .Values.controller.volumes }} -{{ toYaml .Values.controller.volumes | indent 6 }} -{{- end }} +{{- include "nginx-ingress.volumes" . | indent 6 }} {{- if .Values.controller.priorityClassName }} priorityClassName: {{ .Values.controller.priorityClassName }} {{- end }} @@ -117,6 +93,10 @@ spec: periodSeconds: 1 initialDelaySeconds: {{ .Values.controller.readyStatus.initialDelaySeconds }} {{- end }} +{{- if .Values.controller.securityContext }} + securityContext: +{{ toYaml .Values.controller.securityContext | indent 10 }} +{{- else }} securityContext: allowPrivilegeEscalation: false readOnlyRootFilesystem: {{ .Values.controller.readOnlyRootFilesystem }} @@ -127,22 +107,8 @@ spec: - ALL add: - NET_BIND_SERVICE -{{- if or .Values.controller.readOnlyRootFilesystem .Values.controller.volumeMounts }} - volumeMounts: -{{- end }} -{{- if .Values.controller.readOnlyRootFilesystem }} - - mountPath: /etc/nginx - name: nginx-etc - - mountPath: /var/cache/nginx - name: nginx-cache - - mountPath: /var/lib/nginx - name: nginx-lib - - mountPath: /var/log/nginx - name: nginx-log -{{- end }} -{{- if .Values.controller.volumeMounts }} -{{ toYaml .Values.controller.volumeMounts | indent 8 }} {{- end }} +{{- include "nginx-ingress.volumeMounts" . | indent 8 }} env: - name: POD_NAMESPACE valueFrom: @@ -168,10 +134,10 @@ spec: {{- if .Values.controller.extraContainers }} {{ toYaml .Values.controller.extraContainers | nindent 6 }} {{- end }} -{{- if or .Values.controller.readOnlyRootFilesystem .Values.controller.initContainers }} +{{- if or (eq (include "nginx-ingress.readOnlyRootFilesystem" .) "true" ) .Values.controller.initContainers }} initContainers: {{- end }} -{{- if .Values.controller.readOnlyRootFilesystem }} +{{- if eq (include "nginx-ingress.readOnlyRootFilesystem" .) "true" }} - name: init-{{ include "nginx-ingress.name" . }} image: {{ include "nginx-ingress.image" . }} imagePullPolicy: "{{ .Values.controller.image.pullPolicy }}" @@ -180,6 +146,10 @@ spec: resources: {{ toYaml .Values.controller.initContainerResources | indent 10 }} {{- end }} +{{- if .Values.controller.initContainerSecurityContext }} + securityContext: +{{ toYaml .Values.controller.initContainerSecurityContext | indent 10 }} +{{- else }} securityContext: allowPrivilegeEscalation: false readOnlyRootFilesystem: true @@ -188,6 +158,7 @@ spec: capabilities: drop: - ALL +{{- end }} volumeMounts: - mountPath: /mnt/etc name: nginx-etc diff --git a/charts/f5/nginx-ingress/templates/controller-deployment.yaml b/charts/f5/nginx-ingress/templates/controller-deployment.yaml index 1f291ff4c..c8bc8f833 100644 --- a/charts/f5/nginx-ingress/templates/controller-deployment.yaml +++ b/charts/f5/nginx-ingress/templates/controller-deployment.yaml @@ -19,15 +19,7 @@ spec: template: metadata: labels: - {{- include "nginx-ingress.selectorLabels" . | nindent 8 }} -{{- if .Values.nginxServiceMesh.enable }} - nsm.nginx.com/enable-ingress: "true" - nsm.nginx.com/enable-egress: "{{ .Values.nginxServiceMesh.enableEgress }}" - nsm.nginx.com/deployment: {{ include "nginx-ingress.controller.fullname" . }} -{{- end }} -{{- if .Values.controller.pod.extraLabels }} -{{ toYaml .Values.controller.pod.extraLabels | indent 8 }} -{{- end }} + {{- include "nginx-ingress.podLabels" . | nindent 8 }} {{- if or .Values.prometheus.create .Values.controller.pod.annotations }} annotations: {{- if .Values.prometheus.create }} @@ -56,30 +48,14 @@ spec: topologySpreadConstraints: {{ toYaml .Values.controller.topologySpreadConstraints | indent 8 }} {{- end }} -{{- if or .Values.controller.readOnlyRootFilesystem .Values.controller.volumes }} - volumes: -{{- end }} -{{- if .Values.controller.readOnlyRootFilesystem }} - - name: nginx-etc - emptyDir: {} - - name: nginx-cache - emptyDir: {} - - name: nginx-lib - emptyDir: {} - - name: nginx-log - emptyDir: {} -{{- end }} -{{- if .Values.controller.volumes }} -{{ toYaml .Values.controller.volumes | indent 6 }} -{{- end }} +{{- include "nginx-ingress.volumes" . | indent 6 }} {{- if .Values.controller.priorityClassName }} priorityClassName: {{ .Values.controller.priorityClassName }} {{- end }} serviceAccountName: {{ include "nginx-ingress.serviceAccountName" . }} automountServiceAccountToken: true securityContext: - seccompProfile: - type: RuntimeDefault +{{ toYaml .Values.controller.podSecurityContext | indent 8 }} terminationGracePeriodSeconds: {{ .Values.controller.terminationGracePeriodSeconds }} hostNetwork: {{ .Values.controller.hostNetwork }} dnsPolicy: {{ .Values.controller.dnsPolicy }} @@ -126,6 +102,10 @@ spec: {{- end }} resources: {{ toYaml .Values.controller.resources | indent 10 }} +{{- if .Values.controller.securityContext }} + securityContext: +{{ toYaml .Values.controller.securityContext | indent 10 }} +{{- else }} securityContext: allowPrivilegeEscalation: false readOnlyRootFilesystem: {{ .Values.controller.readOnlyRootFilesystem }} @@ -136,22 +116,8 @@ spec: - ALL add: - NET_BIND_SERVICE -{{- if or .Values.controller.readOnlyRootFilesystem .Values.controller.volumeMounts }} - volumeMounts: -{{- end }} -{{- if .Values.controller.readOnlyRootFilesystem }} - - mountPath: /etc/nginx - name: nginx-etc - - mountPath: /var/cache/nginx - name: nginx-cache - - mountPath: /var/lib/nginx - name: nginx-lib - - mountPath: /var/log/nginx - name: nginx-log -{{- end }} -{{- if .Values.controller.volumeMounts}} -{{ toYaml .Values.controller.volumeMounts | indent 8 }} {{- end }} +{{- include "nginx-ingress.volumeMounts" . | indent 8 }} env: - name: POD_NAMESPACE valueFrom: @@ -175,10 +141,10 @@ spec: {{- if .Values.controller.extraContainers }} {{ toYaml .Values.controller.extraContainers | nindent 6 }} {{- end }} -{{- if or .Values.controller.readOnlyRootFilesystem .Values.controller.initContainers }} +{{- if or ( eq (include "nginx-ingress.readOnlyRootFilesystem" .) "true" ) .Values.controller.initContainers }} initContainers: {{- end }} -{{- if .Values.controller.readOnlyRootFilesystem }} +{{- if eq (include "nginx-ingress.readOnlyRootFilesystem" .) "true" }} - name: init-{{ include "nginx-ingress.name" . }} image: {{ include "nginx-ingress.image" . }} imagePullPolicy: "{{ .Values.controller.image.pullPolicy }}" @@ -187,6 +153,10 @@ spec: resources: {{ toYaml .Values.controller.initContainerResources | indent 10 }} {{- end }} +{{- if .Values.controller.initContainerSecurityContext }} + securityContext: +{{ toYaml .Values.controller.initContainerSecurityContext | indent 10 }} +{{- else }} securityContext: allowPrivilegeEscalation: false readOnlyRootFilesystem: true @@ -195,6 +165,7 @@ spec: capabilities: drop: - ALL +{{- end }} volumeMounts: - mountPath: /mnt/etc name: nginx-etc diff --git a/charts/f5/nginx-ingress/values.schema.json b/charts/f5/nginx-ingress/values.schema.json index 2b0564072..36ec62f77 100644 --- a/charts/f5/nginx-ingress/values.schema.json +++ b/charts/f5/nginx-ingress/values.schema.json @@ -46,13 +46,13 @@ "type": "object", "default": {}, "title": "The selectorLabels Schema", - "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.28.0/_definitions.json#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector/properties/matchLabels" + "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.29.0/_definitions.json#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector/properties/matchLabels" }, "annotations": { "type": "object", "default": {}, "title": "The annotations Schema", - "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.28.0/_definitions.json#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta/properties/annotations" + "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.29.0/_definitions.json#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta/properties/annotations" }, "nginxplus": { "type": "boolean", @@ -63,6 +63,37 @@ true ] }, + "debug": { + "type": "object", + "default": {}, + "title": "Runs the container with Delve, expects a version of the IC container with dlv as the entrypoint", + "properties": { + "enable": { + "type": "boolean", + "default": false, + "title": "Runs the container with Delve, expects a version of the IC container with dlv as the entrypoint", + "examples": [ + false, + true + ] + }, + "continue": { + "type": "boolean", + "default": true, + "title": "Starts Delve with --continue which means that IC will not wait for a debugger attach to start", + "examples": [ + false, + true + ] + } + }, + "examples": [ + { + "enable": true, + "continue": "fatal" + } + ] + }, "nginxReloadTimeout": { "type": "integer", "default": 0, @@ -195,7 +226,7 @@ "^.*$": { "anyOf": [ { - "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.28.0/_definitions.json#/definitions/io.k8s.api.core.v1.ContainerPort/properties/hostPort" + "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.29.0/_definitions.json#/definitions/io.k8s.api.core.v1.ContainerPort/properties/hostPort" }, { "type": "boolean" @@ -211,7 +242,7 @@ "title": "The containerPort Schema", "patternProperties": { "^.*$": { - "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.28.0/_definitions.json#/definitions/io.k8s.api.core.v1.ContainerPort/properties/containerPort" + "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.29.0/_definitions.json#/definitions/io.k8s.api.core.v1.ContainerPort/properties/containerPort" } }, "additionalProperties": false @@ -220,7 +251,7 @@ "type": "string", "allOf": [ { - "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.28.0/_definitions.json#/definitions/io.k8s.api.core.v1.PodSpec/properties/dnsPolicy" + "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.29.0/_definitions.json#/definitions/io.k8s.api.core.v1.PodSpec/properties/dnsPolicy" }, { "enum": [ @@ -270,7 +301,7 @@ "title": "The customPorts to expose on the NGINX Ingress Controller pod", "items": { "type": "object", - "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.28.0/_definitions.json#/definitions/io.k8s.api.core.v1.ContainerPort" + "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.29.0/_definitions.json#/definitions/io.k8s.api.core.v1.ContainerPort" }, "examples": [ [ @@ -325,7 +356,7 @@ "title": "The pullPolicy for the Ingress Controller image", "allOf": [ { - "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.28.0/_definitions.json#/definitions/io.k8s.api.core.v1.Container/properties/imagePullPolicy" + "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.29.0/_definitions.json#/definitions/io.k8s.api.core.v1.Container/properties/imagePullPolicy" }, { "enum": [ @@ -354,7 +385,7 @@ "type": "object", "default": {}, "title": "The lifecycle Schema", - "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.28.0/_definitions.json#/definitions/io.k8s.api.core.v1.Lifecycle" + "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.29.0/_definitions.json#/definitions/io.k8s.api.core.v1.Lifecycle" }, "customConfigMap": { "type": "string", @@ -382,7 +413,7 @@ "type": "object", "default": {}, "title": "The annotations Schema", - "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.28.0/_definitions.json#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta/properties/annotations" + "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.29.0/_definitions.json#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta/properties/annotations" }, "entries": { "type": "object", @@ -469,25 +500,43 @@ "type": "object", "default": {}, "title": "The nodeSelector Schema", - "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.28.0/_definitions.json#/definitions/io.k8s.api.core.v1.PodSpec/properties/nodeSelector" + "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.29.0/_definitions.json#/definitions/io.k8s.api.core.v1.PodSpec/properties/nodeSelector" }, "terminationGracePeriodSeconds": { "type": "integer", "default": 30, "title": "The terminationGracePeriodSeconds Schema", - "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.28.0/_definitions.json#/definitions/io.k8s.api.core.v1.PodSpec/properties/terminationGracePeriodSeconds" + "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.29.0/_definitions.json#/definitions/io.k8s.api.core.v1.PodSpec/properties/terminationGracePeriodSeconds" + }, + "podSecurityContext": { + "type": "object", + "default": {}, + "title": "The podSecurityContext Schema", + "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.29.0/_definitions.json#/definitions/io.k8s.api.core.v1.PodSecurityContext" + }, + "securityContext": { + "type": "object", + "default": {}, + "title": "The securityContext Schema", + "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.29.0/_definitions.json#/definitions/io.k8s.api.core.v1.SecurityContext" + }, + "initContainerSecurityContext": { + "type": "object", + "default": {}, + "title": "The initContainerSecurityContext Schema", + "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.29.0/_definitions.json#/definitions/io.k8s.api.core.v1.SecurityContext" }, "resources": { "type": "object", "default": {}, "title": "The resources Schema", - "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.28.0/_definitions.json#/definitions/io.k8s.api.core.v1.ResourceRequirements" + "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.29.0/_definitions.json#/definitions/io.k8s.api.core.v1.ResourceRequirements" }, "initContainerResources": { "type": "object", "default": {}, "title": "The resources Schema", - "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.28.0/_definitions.json#/definitions/io.k8s.api.core.v1.ResourceRequirements" + "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.29.0/_definitions.json#/definitions/io.k8s.api.core.v1.ResourceRequirements" }, "tolerations": { "type": "array", @@ -495,20 +544,20 @@ "title": "The tolerations Schema", "items": { "type": "object", - "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.28.0/_definitions.json#/definitions/io.k8s.api.core.v1.Toleration" + "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.29.0/_definitions.json#/definitions/io.k8s.api.core.v1.Toleration" } }, "affinity": { "type": "object", "default": {}, "title": "The affinity Schema", - "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.28.0/_definitions.json#/definitions/io.k8s.api.core.v1.Affinity" + "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.29.0/_definitions.json#/definitions/io.k8s.api.core.v1.Affinity" }, "topologySpreadConstraints": { "type": "object", "default": {}, "title": "The topologySpreadConstraints Schema", - "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.28.0/_definitions.json#/definitions/io.k8s.api.core.v1.PodSpec/properties/topologySpreadConstraints" + "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.29.0/_definitions.json#/definitions/io.k8s.api.core.v1.PodSpec/properties/topologySpreadConstraints" }, "env": { "type": "array", @@ -516,7 +565,7 @@ "title": "The env Schema", "items": { "type": "object", - "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.28.0/_definitions.json#/definitions/io.k8s.api.core.v1.EnvVar" + "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.29.0/_definitions.json#/definitions/io.k8s.api.core.v1.EnvVar" } }, "volumes": { @@ -525,7 +574,7 @@ "title": "The volumes Schema", "items": { "type": "object", - "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.28.0/_definitions.json#/definitions/io.k8s.api.core.v1.Volume" + "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.29.0/_definitions.json#/definitions/io.k8s.api.core.v1.Volume" } }, "volumeMounts": { @@ -534,7 +583,7 @@ "title": "The volumeMounts Schema", "items": { "type": "object", - "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.28.0/_definitions.json#/definitions/io.k8s.api.core.v1.VolumeMount" + "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.29.0/_definitions.json#/definitions/io.k8s.api.core.v1.VolumeMount" } }, "initContainers": { @@ -543,14 +592,14 @@ "title": "The initContainers Schema", "items": { "type": "object", - "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.28.0/_definitions.json#/definitions/io.k8s.api.core.v1.Container" + "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.29.0/_definitions.json#/definitions/io.k8s.api.core.v1.Container" } }, "minReadySeconds": { "type": "integer", "default": 0, "title": "The minReadySeconds Schema", - "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.28.0/_definitions.json#/definitions/io.k8s.api.apps.v1.DeploymentSpec/properties/minReadySeconds" + "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.29.0/_definitions.json#/definitions/io.k8s.api.apps.v1.DeploymentSpec/properties/minReadySeconds" }, "strategy": { "type": "object", @@ -558,7 +607,7 @@ "title": "The strategy Schema", "allOf": [ { - "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.28.0/_definitions.json#/definitions/io.k8s.api.apps.v1.DeploymentStrategy" + "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.29.0/_definitions.json#/definitions/io.k8s.api.apps.v1.DeploymentStrategy" }, { "properties": { @@ -580,7 +629,7 @@ "title": "The extraContainers Schema", "items": { "type": "object", - "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.28.0/_definitions.json#/definitions/io.k8s.api.core.v1.Container" + "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.29.0/_definitions.json#/definitions/io.k8s.api.core.v1.Container" } }, "replicaCount": { @@ -848,19 +897,19 @@ "type": "string", "default": "", "title": "The type", - "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.28.0/_definitions.json#/definitions/io.k8s.api.core.v1.ServiceSpec/properties/type" + "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.29.0/_definitions.json#/definitions/io.k8s.api.core.v1.ServiceSpec/properties/type" }, "externalTrafficPolicy": { "type": "string", "default": "", "title": "The externalTrafficPolicy", - "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.28.0/_definitions.json#/definitions/io.k8s.api.core.v1.ServiceSpec/properties/externalTrafficPolicy" + "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.29.0/_definitions.json#/definitions/io.k8s.api.core.v1.ServiceSpec/properties/externalTrafficPolicy" }, "annotations": { "type": "object", "default": {}, "title": "The annotations", - "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.28.0/_definitions.json#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta/properties/annotations" + "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.29.0/_definitions.json#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta/properties/annotations" }, "extraLabels": { "type": "object", @@ -876,13 +925,13 @@ "type": "string", "default": "", "title": "The loadBalancerIP", - "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.28.0/_definitions.json#/definitions/io.k8s.api.core.v1.ServiceSpec/properties/loadBalancerIP" + "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.29.0/_definitions.json#/definitions/io.k8s.api.core.v1.ServiceSpec/properties/loadBalancerIP" }, "externalIPs": { "type": "array", "default": [], "title": "The externalIPs", - "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.28.0/_definitions.json#/definitions/io.k8s.api.core.v1.ServiceSpec/properties/externalIPs" + "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.29.0/_definitions.json#/definitions/io.k8s.api.core.v1.ServiceSpec/properties/externalIPs" }, "loadBalancerSourceRanges": { "type": "array", @@ -897,13 +946,13 @@ "type": "boolean", "default": false, "title": "The allocateLoadBalancerNodePorts Schema", - "ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.28.0/_definitions.json#/definitions/io.k8s.api.core.v1.ServiceSpec/properties/allocateLoadBalancerNodePorts" + "ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.29.0/_definitions.json#/definitions/io.k8s.api.core.v1.ServiceSpec/properties/allocateLoadBalancerNodePorts" }, "ipFamilyPolicy": { "type": "string", "default": "", "title": "The ipFamilyPolicy Schema", - "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.28.0/_definitions.json#/definitions/io.k8s.api.core.v1.ServiceSpec/properties/ipFamilyPolicy", + "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.29.0/_definitions.json#/definitions/io.k8s.api.core.v1.ServiceSpec/properties/ipFamilyPolicy", "examples": [ "" ] @@ -912,7 +961,7 @@ "type": "array", "default": [], "title": "The ipFamilies Schema", - "ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.28.0/_definitions.json#/definitions/io.k8s.api.core.v1.ServiceSpec/properties/ipFamilies" + "ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.29.0/_definitions.json#/definitions/io.k8s.api.core.v1.ServiceSpec/properties/ipFamilies" }, "httpPort": { "type": "object", @@ -1016,7 +1065,7 @@ "title": "The customPorts", "items": { "type": "object", - "ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.28.0/_definitions.json#/definitions/io.k8s.api.core.v1.ServicePort" + "ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.29.0/_definitions.json#/definitions/io.k8s.api.core.v1.ServicePort" } } }, @@ -1058,7 +1107,7 @@ "type": "object", "default": {}, "title": "The annotations Schema", - "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.28.0/_definitions.json#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta/properties/annotations" + "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.29.0/_definitions.json#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta/properties/annotations" }, "name": { "type": "string", @@ -1203,7 +1252,7 @@ "type": "object", "default": {}, "title": "The annotations Schema", - "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.28.0/_definitions.json#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta/properties/annotations" + "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.29.0/_definitions.json#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta/properties/annotations" } }, "examples": [ @@ -1227,13 +1276,13 @@ "type": "object", "default": {}, "title": "The annotations Schema", - "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.28.0/_definitions.json#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta/properties/annotations" + "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.29.0/_definitions.json#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta/properties/annotations" }, "extraLabels": { "type": "object", "default": {}, "title": "The extraLabels Schema", - "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.28.0/_definitions.json#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta/properties/labels" + "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.29.0/_definitions.json#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta/properties/labels" } }, "examples": [ @@ -1247,7 +1296,7 @@ "type": "string", "default": "", "title": "The priorityClassName", - "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.28.0/_definitions.json#/definitions/io.k8s.api.core.v1.PodSpec/properties/priorityClassName" + "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.29.0/_definitions.json#/definitions/io.k8s.api.core.v1.PodSpec/properties/priorityClassName" }, "podDisruptionBudget": { "type": "object", @@ -1264,13 +1313,13 @@ "type": "object", "default": {}, "title": "The annotations Schema", - "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.28.0/_definitions.json#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta/properties/annotations" + "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.29.0/_definitions.json#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta/properties/annotations" }, "minAvailable": { - "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.28.0/_definitions.json#/definitions/io.k8s.api.policy.v1.PodDisruptionBudgetSpec/properties/minAvailable" + "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.29.0/_definitions.json#/definitions/io.k8s.api.policy.v1.PodDisruptionBudgetSpec/properties/minAvailable" }, "maxUnavailable": { - "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.28.0/_definitions.json#/definitions/io.k8s.api.policy.v1.PodDisruptionBudgetSpec/properties/maxUnavailable" + "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.29.0/_definitions.json#/definitions/io.k8s.api.policy.v1.PodDisruptionBudgetSpec/properties/maxUnavailable" } }, "examples": [ @@ -1309,7 +1358,7 @@ "initialDelaySeconds": { "type": "integer", "default": 0, - "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.28.0/_definitions.json#/definitions/io.k8s.api.core.v1.Probe/properties/initialDelaySeconds" + "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.29.0/_definitions.json#/definitions/io.k8s.api.core.v1.Probe/properties/initialDelaySeconds" } }, "examples": [ @@ -1367,6 +1416,30 @@ "examples": [ true ] + }, + "telemetryReporting": { + "type": "object", + "default": {}, + "title": "Configure telemetry reporting options", + "required": [], + "properties": { + "enable": { + "type": "boolean", + "default": true, + "title": "Enable telemetry reporting", + "examples": [ + true + ] + } + } + }, + "enableWeightChangesDynamicReload": { + "type": "boolean", + "default": false, + "title": "Enables weight changes without reloading for NGINX Plus", + "examples": [ + false + ] } }, "examples": [ @@ -1416,6 +1489,13 @@ }, "nodeSelector": {}, "terminationGracePeriodSeconds": 30, + "podSecurityContext": { + "seccompProfile": { + "type": "RuntimeDefault" + } + }, + "securityContext": {}, + "initContainerSecurityContext": {}, "resources": { "requests": { "cpu": "100m", @@ -1601,7 +1681,7 @@ "type": "object", "default": {}, "title": "The labels Schema", - "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.28.0/_definitions.json#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta/properties/labels" + "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.29.0/_definitions.json#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta/properties/labels" } } }, @@ -1623,13 +1703,13 @@ "type": "object", "default": {}, "title": "The labels Schema", - "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.28.0/_definitions.json#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta/properties/labels" + "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.29.0/_definitions.json#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta/properties/labels" }, "selectorMatchLabels": { "type": "object", "default": {}, "title": "The selectorMatchLabels Schema", - "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.28.0/_definitions.json#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector/properties/matchLabels" + "$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.29.0/_definitions.json#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector/properties/matchLabels" }, "endpoints": { "type": "array", @@ -1739,6 +1819,168 @@ "enableEgress": false } ] + }, + "nginxAgent": { + "type": "object", + "default": { + "enable": false + }, + "title": "Configuration for NGINX Agent.", + "required": [ + "enable" + ], + "properties": { + "enable": { + "type": "boolean", + "default": false, + "title": "Enable NGINX Agent", + "examples": [ + false + ] + }, + "instanceGroup": { + "type": "string", + "default": "", + "title": "Set the --instance-group argument for NGINX Agent", + "examples": [ + "my-instance-group" + ] + }, + "logLevel": { + "type": "string", + "default": "info", + "title": "Log level for NGINX Agent", + "enum": [ + "panic", + "fatal", + "error", + "info", + "debug", + "trace" + ], + "examples": [ + "error" + ] + }, + "instanceManager": { + "type": "object", + "default": {}, + "title": "Configuration for the connection to NGINX Instance Manager", + "examples": [], + "required": [ + "host" + ], + "properties": { + "host": { + "type": "string", + "title": "FQDN or IP for connecting to NGINX Instance Manager", + "examples": [ + "nim.example.com" + ] + }, + "grpcPort": { + "type": "integer", + "title": "Port for connecting to NGINX Instance Manager", + "default": 443, + "examples": [ + 443 + ] + }, + "sni": { + "type": "string", + "title": "Server Name Indication for NGINX Instance Manager", + "default": "", + "examples": [ + "nim.example.com" + ] + }, + "tls": { + "type": "object", + "default": {}, + "title": "TLS configuration for connection between NGINX Agent and NGINX Instance Manager", + "properties": { + "enable": { + "type": "boolean", + "default": "true", + "title": "enable TLS for NGINX Instance Manager connection" + }, + "secret": { + "type": "string", + "default": "", + "title": "kubernetes.io/tls secret with a TLS certificate and key for using mTLS between NGINX Agent and NGINX Instance Manager" + }, + "caSecret": { + "type": "string", + "default": "", + "title": "nginx.org/ca secret for verification of Instance Manager TLS" + }, + "skipVerify": { + "type": "boolean", + "default": "false", + "title": "skip certificate verification" + } + } + } + } + }, + "syslog": { + "type": "object", + "default": { + "host": "127.0.0.1", + "port": 1514 + }, + "title": "Syslog listener which NGINX Agent uses to accept messages from App Protect WAF", + "properties": { + "host": { + "type": "string", + "title": "Address for NGINX Agent to run syslog listener", + "default": "127.0.0.1", + "examples": [ + "127.0.0.1" + ] + }, + "port": { + "type": "integer", + "title": "Port for NGINX Agent to run syslog listener", + "default": 1514, + "examples": [ + 1514 + ] + } + } + }, + "napMonitoring": { + "type": "object", + "default": {}, + "title": "NGINX App Protect Monitoring config", + "properties": { + "collectorBufferSize": { + "type": "integer", + "default": 50000, + "title": "Buffer size for collector. Will contain log lines and parsed log lines", + "examples": [ + 50000 + ] + }, + "processorBufferSize": { + "type": "integer", + "default": 50000, + "title": "Buffer size for processor. Will contain log lines and parsed log lines", + "examples": [ + 50000 + ] + } + } + }, + "customConfigMap": { + "type": "string", + "title": "The name of a custom ConfigMap to use instead of the one provided by default", + "default": "", + "examples": [ + "my-custom-configmap" + ] + } + } } }, "examples": [ diff --git a/charts/f5/nginx-ingress/values.yaml b/charts/f5/nginx-ingress/values.yaml index fe85c8a35..d78eb7705 100644 --- a/charts/f5/nginx-ingress/values.yaml +++ b/charts/f5/nginx-ingress/values.yaml @@ -79,7 +79,6 @@ controller: ## The tag of the Ingress Controller image. If not specified the appVersion from Chart.yaml is used as a tag. # tag: "3.4.3" - ## The digest of the Ingress Controller image. ## If digest is specified it has precedence over tag and will be used instead # digest: "sha256:CHANGEME" @@ -168,6 +167,26 @@ controller: # cpu: 1 # memory: 1Gi + ## The security context for the Ingress Controller pods. + podSecurityContext: + seccompProfile: + type: RuntimeDefault + + ## The security context for the Ingress Controller containers. + securityContext: {} # Remove curly brackets before adding values + # allowPrivilegeEscalation: true + # readOnlyRootFilesystem: true + # runAsUser: 101 #nginx + # runAsNonRoot: true + # capabilities: + # drop: + # - ALL + # add: + # - NET_BIND_SERVICE + + ## The security context for the Ingress Controller init container which is used when readOnlyRootFilesystem is set to true. + initContainerSecurityContext: {} + ## The resources for the Ingress Controller init container which is used when readOnlyRootFilesystem is set to true. initContainerResources: requests: @@ -461,11 +480,23 @@ controller: defaultHTTPSListenerPort: 443 ## Configure root filesystem as read-only and add volumes for temporary data. + ## Three major releases after 3.5.x this argument will be moved to the `securityContext` section. + ## This value will not be used if `controller.securityContext` is set readOnlyRootFilesystem: false ## Enable dynamic reloading of certificates enableSSLDynamicReload: true + ## Configure telemetry reporting options + telemetryReporting: + ## Enable telemetry reporting + enable: true + + ## Allows weight adjustments without reloading the NGINX Configuration for two-way splits in NGINX Plus. + ## May require increasing map_hash_bucket_size, map_hash_max_size, + ## variable_hash_bucket_size, and variable_hash_max_size in the ConfigMap based on the number of two-way splits. + enableWeightChangesDynamicReload: false + rbac: ## Configures RBAC. create: true @@ -527,3 +558,31 @@ nginxServiceMesh: ## Enables NGINX Service Mesh workload to route egress traffic through the Ingress Controller. ## Requires nginxServiceMesh.enable enableEgress: false + +nginxAgent: + ## Enables NGINX Agent. + enable: false + ## If nginxAgent.instanceGroup is not set the value of nginx-ingress.controller.fullname will be used + instanceGroup: "" + logLevel: "error" + ## Syslog listener which NGINX Agent uses to accept messages from App Protect WAF + syslog: + host: "127.0.0.1" + port: 1514 + napMonitoring: + collectorBufferSize: 50000 + processorBufferSize: 50000 + instanceManager: + # FQDN or IP for connecting to NGINX Instance Manager, e.g. nim.example.com + host: "" + grpcPort: 443 + sni: "" + tls: + enabled: true + skipVerify: false + ## kubernetes.io/tls secret with a TLS certificate and key for using mTLS between NGINX Agent and Instance Manager + secret: "" + ## nginx.org/ca secret for verification of Instance Manager TLS + caSecret: "" + ## The name of a custom ConfigMap to use instead of the one provided by default + customConfigMap: "" diff --git a/charts/fairwinds/polaris/Chart.yaml b/charts/fairwinds/polaris/Chart.yaml index b525554ae..e9b543489 100644 --- a/charts/fairwinds/polaris/Chart.yaml +++ b/charts/fairwinds/polaris/Chart.yaml @@ -12,4 +12,4 @@ maintainers: - email: robertb@fairwinds.com name: rbren name: polaris -version: 5.17.0 +version: 5.17.1 diff --git a/charts/fairwinds/polaris/README.md b/charts/fairwinds/polaris/README.md index 3c6637a1b..d58a660d2 100644 --- a/charts/fairwinds/polaris/README.md +++ b/charts/fairwinds/polaris/README.md @@ -54,7 +54,7 @@ the 0.10.0 version of this chart will only work on kubernetes 1.14.0+ | dashboard.logLevel | string | `"Info"` | Set the logging level for the Dashboard command | | dashboard.podAdditionalLabels | object | `{}` | Custom additional labels on dashboard pods. | | dashboard.deploymentAnnotations | object | `{}` | Custom additional annotations on dashboard Deployment. | -| dashboard.resources | object | `{"limits":{"cpu":"150m","memory":"512Mi"},"requests":{"cpu":"100m","memory":"128Mi"}}` | Requests and limits for the dashboard | +| dashboard.resources | object | `{"limits":{},"requests":{"cpu":"100m","memory":"128Mi"}}` | Requests and limits for the dashboard | | dashboard.extraContainers | list | `[]` | allows injecting additional containers. | | dashboard.service.type | string | `"ClusterIP"` | Service Type | | dashboard.service.annotations | object | `{}` | Service annotations | @@ -97,7 +97,7 @@ the 0.10.0 version of this chart will only work on kubernetes 1.14.0+ | webhook.defaultRules | list | `[{"apiGroups":["apps"],"apiVersions":["v1","v1beta1","v1beta2"],"operations":["CREATE","UPDATE"],"resources":["daemonsets","deployments","statefulsets"],"scope":"Namespaced"},{"apiGroups":["batch"],"apiVersions":["v1","v1beta1"],"operations":["CREATE","UPDATE"],"resources":["jobs","cronjobs"],"scope":"Namespaced"},{"apiGroups":[""],"apiVersions":["v1"],"operations":["CREATE","UPDATE"],"resources":["pods","replicationcontrollers"],"scope":"Namespaced"}]` | An array of rules for common types for the ValidatingWebhookConfiguration | | webhook.podAdditionalLabels | object | `{}` | Custom additional labels on webhook pods. | | webhook.deploymentAnnotations | object | `{}` | Custom additional annotations on webhook Deployment. | -| webhook.resources | object | `{"limits":{"cpu":"100m","memory":"128Mi"},"requests":{"cpu":"100m","memory":"128Mi"}}` | Requests and limits for the webhook. | +| webhook.resources | object | `{"limits":{},"requests":{"cpu":"100m","memory":"128Mi"}}` | Requests and limits for the webhook. | | webhook.priorityClassName | string | `nil` | Priority Class name to be used in deployment if provided. | | webhook.disallowExemptions | bool | `false` | Disallow any exemption | | webhook.disallowConfigExemptions | bool | `false` | Disallow exemptions that are configured in the config file | diff --git a/charts/fairwinds/polaris/values.yaml b/charts/fairwinds/polaris/values.yaml index 27d2f4100..209ebb1cd 100644 --- a/charts/fairwinds/polaris/values.yaml +++ b/charts/fairwinds/polaris/values.yaml @@ -54,9 +54,7 @@ dashboard: requests: cpu: 100m memory: 128Mi - limits: - cpu: 150m - memory: 512Mi + limits: {} # dashboard.extraContainers -- allows injecting additional containers. extraContainers: [] # extraContainers: @@ -234,9 +232,7 @@ webhook: requests: cpu: 100m memory: 128Mi - limits: - cpu: 100m - memory: 128Mi + limits: {} # webhook.priorityClassName -- Priority Class name to be used in deployment if provided. priorityClassName: # webhook.disallowExemptions -- Disallow any exemption diff --git a/charts/hashicorp/consul/Chart.yaml b/charts/hashicorp/consul/Chart.yaml index 481e83302..f7e484b42 100644 --- a/charts/hashicorp/consul/Chart.yaml +++ b/charts/hashicorp/consul/Chart.yaml @@ -1,11 +1,11 @@ annotations: artifacthub.io/images: | - name: consul - image: hashicorp/consul:1.18.0 + image: hashicorp/consul:1.18.1 - name: consul-k8s-control-plane - image: hashicorp/consul-k8s-control-plane:1.4.0 + image: hashicorp/consul-k8s-control-plane:1.4.1 - name: consul-dataplane - image: hashicorp/consul-dataplane:1.4.0 + image: hashicorp/consul-dataplane:1.4.1 - name: envoy image: envoyproxy/envoy:v1.25.11 artifacthub.io/license: MPL-2.0 @@ -25,7 +25,7 @@ annotations: catalog.cattle.io/kube-version: '>=1.22.0-0' catalog.cattle.io/release-name: consul apiVersion: v2 -appVersion: 1.18.0 +appVersion: 1.18.1 description: Official HashiCorp Consul Chart home: https://www.consul.io icon: https://raw.githubusercontent.com/hashicorp/consul-k8s/main/assets/icon.png @@ -34,4 +34,4 @@ name: consul sources: - https://github.com/hashicorp/consul - https://github.com/hashicorp/consul-k8s -version: 1.4.0 +version: 1.4.1 diff --git a/charts/hashicorp/consul/templates/crd-gatewayclassconfigs-v1.yaml b/charts/hashicorp/consul/templates/crd-gatewayclassconfigs-v1.yaml index 130db72a2..41023c19d 100644 --- a/charts/hashicorp/consul/templates/crd-gatewayclassconfigs-v1.yaml +++ b/charts/hashicorp/consul/templates/crd-gatewayclassconfigs-v1.yaml @@ -131,6 +131,23 @@ spec: for gateway containers format: int32 type: integer + metrics: + description: Metrics defines how to configure the metrics for a gateway. + properties: + enabled: + description: Enable metrics for this class of gateways. If unspecified, + will inherit behavior from the global Helm configuration. + type: boolean + path: + description: The path used for metrics. + type: string + port: + description: The port used for metrics. + format: int32 + maximum: 65535 + minimum: 1024 + type: integer + type: object nodeSelector: additionalProperties: type: string diff --git a/charts/hashicorp/consul/templates/crd-terminatinggateways.yaml b/charts/hashicorp/consul/templates/crd-terminatinggateways.yaml index 565aa6338..cd53122e9 100644 --- a/charts/hashicorp/consul/templates/crd-terminatinggateways.yaml +++ b/charts/hashicorp/consul/templates/crd-terminatinggateways.yaml @@ -73,6 +73,10 @@ spec: to use for TLS connections from the gateway to the linked service. type: string + disableAutoHostRewrite: + description: DisableAutoHostRewrite disables terminating gateways + auto host rewrite feature when set to true. + type: boolean keyFile: description: KeyFile is the optional path to a private key to use for TLS connections from the gateway to the linked service. diff --git a/charts/hashicorp/consul/templates/gateway-resources-job.yaml b/charts/hashicorp/consul/templates/gateway-resources-job.yaml index 5934372ed..ead22833e 100644 --- a/charts/hashicorp/consul/templates/gateway-resources-job.yaml +++ b/charts/hashicorp/consul/templates/gateway-resources-job.yaml @@ -101,7 +101,16 @@ spec: - -openshift-scc-name={{ .Values.connectInject.apiGateway.managedGatewayClass.openshiftSCCName }} {{- end }} - -map-privileged-container-ports={{ .Values.connectInject.apiGateway.managedGatewayClass.mapPrivilegedContainerPorts }} - {{- end}} + {{- if (ne (.Values.connectInject.apiGateway.managedGatewayClass.metrics.enabled | toString) "-") }} + - -enable-metrics={{ .Values.connectInject.apiGateway.managedGatewayClass.metrics.enabled | toString }} + {{- end }} + {{- if .Values.connectInject.apiGateway.managedGatewayClass.metrics.path }} + - -metrics-path={{ .Values.connectInject.apiGateway.managedGatewayClass.metrics.path }} + {{- end }} + {{- if .Values.connectInject.apiGateway.managedGatewayClass.metrics.port }} + - -metrics-port={{ .Values.connectInject.apiGateway.managedGatewayClass.metrics.port }} + {{- end }} + {{- end }} resources: requests: memory: "50Mi" diff --git a/charts/hashicorp/consul/templates/sync-catalog-clusterrole.yaml b/charts/hashicorp/consul/templates/sync-catalog-clusterrole.yaml index 585b5ad17..89ea9f3c5 100644 --- a/charts/hashicorp/consul/templates/sync-catalog-clusterrole.yaml +++ b/charts/hashicorp/consul/templates/sync-catalog-clusterrole.yaml @@ -14,7 +14,19 @@ rules: - apiGroups: [ "" ] resources: - services - - endpoints + verbs: + - get + - list + - watch +{{- if .Values.syncCatalog.toK8S }} + - update + - patch + - delete + - create +{{- end }} +- apiGroups: ["discovery.k8s.io"] + resources: + - endpointslices verbs: - get - list @@ -45,4 +57,4 @@ rules: - get - list - watch -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/hashicorp/consul/values.yaml b/charts/hashicorp/consul/values.yaml index 2bad84077..42c5cc95d 100644 --- a/charts/hashicorp/consul/values.yaml +++ b/charts/hashicorp/consul/values.yaml @@ -66,7 +66,7 @@ global: # image: "hashicorp/consul-enterprise:1.10.0-ent" # ``` # @default: hashicorp/consul: - image: hashicorp/consul:1.18.0 + image: hashicorp/consul:1.18.1 # Array of objects containing image pull secret names that will be applied to each service account. # This can be used to reference image pull secrets if using a custom consul or consul-k8s-control-plane Docker image. @@ -86,7 +86,7 @@ global: # image that is used for functionality such as catalog sync. # This can be overridden per component. # @default: hashicorp/consul-k8s-control-plane: - imageK8S: hashicorp/consul-k8s-control-plane:1.4.0 + imageK8S: hashicorp/consul-k8s-control-plane:1.4.1 # The name of the datacenter that the agents should # register as. This can't be changed once the Consul cluster is up and running @@ -623,7 +623,7 @@ global: # @type: boolean disableAgentHostName: false - # Configures consul agent underlying host metrics. Only applicable if + # Configures consul agent underlying host metrics. Default is false. # Only applicable if `global.metrics.enabled` and `global.metrics.enableAgentMetrics` is true. # @type: boolean enableHostMetrics: false @@ -646,14 +646,12 @@ global: # @type: boolean enableTelemetryCollector: false - # This configures the list of filter rules to apply for allowing/blocking + # Configures the list of filter rules to apply for allowing or blocking # metrics by prefix in the following format: # # A leading "+" will enable any metrics with the given prefix, and a leading "-" will block them. # If there is overlap between two rules, the more specific rule will take precedence. # Blocking will take priority if the same prefix is listed multiple times. - # - # - allowList: prefixFilter: # @type: array allowList: [] @@ -693,8 +691,7 @@ global: # Configures Kubernetes Prometheus/OpenMetrics auto-discovery annotations for use with Datadog. # This configuration is less common and more for advanced usage with custom metrics monitoring - # configurations. See https://docs.datadoghq.com/containers/kubernetes/prometheus/?tab=kubernetesadv2 for more details - # surround further configuration. + # configurations. Refer to the [Datadog documentation](https://docs.datadoghq.com/containers/kubernetes/prometheus/?tab=kubernetesadv2) for more details. openMetricsPrometheus: # @default: false # @type: boolean @@ -791,7 +788,7 @@ global: # The name (and tag) of the consul-dataplane Docker image used for the # connect-injected sidecar proxies and mesh, terminating, and ingress gateways. # @default: hashicorp/consul-dataplane: - imageConsulDataplane: hashicorp/consul-dataplane:1.4.0 + imageConsulDataplane: hashicorp/consul-dataplane:1.4.1 # Configuration for running this Helm chart on the Red Hat OpenShift platform. # This Helm chart currently supports OpenShift v4.x+. @@ -905,14 +902,14 @@ global: # Consul feature flags that will be enabled across components. # Supported feature flags: - # * `resource-apis`: - # _**Danger**_! This feature is under active development. It is not + # - `resource-apis`: + # _**Warning**_! This feature is under active development. It is not # recommended for production use. Setting this flag during an # upgrade could risk breaking your Consul cluster. # If this flag is set, Consul components will use the # V2 resources APIs for all operations. - # * `v2tenancy`: - # _**Danger**_! This feature is under active development. It is not + # - `v2tenancy`: + # _**Warning**_! This feature is under active development. It is not # recommended for production use. Setting this flag during an # upgrade could risk breaking your Consul cluster. # If this flag is set, Consul V2 resources (catalog, mesh, auth, etc) @@ -1164,7 +1161,7 @@ server: enabled: true # The maximum number of unavailable pods. In most cases you should not change this as it is automatically set to - # the correct number when left as null. This setting has been kept to not break backwards compatibility. + # the correct number when left as null. This setting has been kept to preserve backwards compatibility. # # By default, this is set to 1 internally in the chart. When server pods are stopped gracefully, they leave the Raft # consensus pool. When running an odd number of servers, one server leaving the pool does not change the quorum @@ -2420,6 +2417,19 @@ connectInject: # @type: string service: null + # Metrics settings for gateways created with this gateway class configuration. + metrics: + # This value enables or disables metrics collection on a gateway, overriding the global gateway metrics collection settings. + # @type: boolean + enabled: "-" + # This value sets the port to use for scraping gateway metrics via prometheus, defaults to 20200 if not set. Must be in the port + # range of 1024-65535. + # @type: int + port: null + # This value sets the path to use for scraping gateway metrics via prometheus, defaults to /metrics if not set. + # @type: string + path: null + # The resource settings for Pods handling traffic for Gateway API. # @recurse: false # @type: map diff --git a/charts/jenkins/jenkins/CHANGELOG.md b/charts/jenkins/jenkins/CHANGELOG.md index aae963528..7fb10f603 100644 --- a/charts/jenkins/jenkins/CHANGELOG.md +++ b/charts/jenkins/jenkins/CHANGELOG.md @@ -12,6 +12,10 @@ Use the following links to reference issues, PRs, and commits prior to v2.6.0. The changelog until v1.5.7 was auto-generated based on git commits. Those entries include a reference to the git commit to be able to get more details. +## 5.1.5 + +Fix Prometheus controller name. + ## 5.1.4 Update `docker.io/bats/bats` to version `1.11.0` diff --git a/charts/jenkins/jenkins/Chart.yaml b/charts/jenkins/jenkins/Chart.yaml index e27a284cc..5b3377402 100644 --- a/charts/jenkins/jenkins/Chart.yaml +++ b/charts/jenkins/jenkins/Chart.yaml @@ -1,7 +1,7 @@ annotations: artifacthub.io/category: integration-delivery artifacthub.io/changes: | - - Update `docker.io/bats/bats` to version `1.11.0` + - Fix Prometheus controller name. artifacthub.io/images: | - name: jenkins image: docker.io/jenkins/jenkins:2.440.2-jdk17 @@ -50,4 +50,4 @@ sources: - https://github.com/maorfr/kube-tasks - https://github.com/jenkinsci/configuration-as-code-plugin type: application -version: 5.1.4 +version: 5.1.5 diff --git a/charts/jenkins/jenkins/README.md b/charts/jenkins/jenkins/README.md index df29e0b22..4ddd1faa4 100644 --- a/charts/jenkins/jenkins/README.md +++ b/charts/jenkins/jenkins/README.md @@ -609,10 +609,10 @@ controller: If you want to expose Prometheus metrics you need to install the [Jenkins Prometheus Metrics Plugin](https://github.com/jenkinsci/prometheus-plugin). It will expose an endpoint (default `/prometheus`) with metrics where a Prometheus Server can scrape. -If you have implemented [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator), you can set `master.prometheus.enabled` to `true` to configure a `ServiceMonitor` and `PrometheusRule`. -If you want to further adjust alerting rules you can do so by configuring `master.prometheus.alertingrules` +If you have implemented [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator), you can set `controller.prometheus.enabled` to `true` to configure a `ServiceMonitor` and `PrometheusRule`. +If you want to further adjust alerting rules you can do so by configuring `controller.prometheus.alertingrules` -If you have implemented Prometheus without using the operator, you can leave `master.prometheus.enabled` set to `false`. +If you have implemented Prometheus without using the operator, you can leave `controller.prometheus.enabled` set to `false`. ### Running Behind a Forward Proxy diff --git a/charts/jfrog/artifactory-ha/CHANGELOG.md b/charts/jfrog/artifactory-ha/CHANGELOG.md index 0391e5207..31a8e0632 100644 --- a/charts/jfrog/artifactory-ha/CHANGELOG.md +++ b/charts/jfrog/artifactory-ha/CHANGELOG.md @@ -1,7 +1,7 @@ # JFrog Artifactory-ha Chart Changelog All changes to this chart will be documented in this file -## [107.77.7] - Feb 20, 2024 +## [107.77.8] - Feb 20, 2024 * Removed integration service * Added recommended postgresql sizing configurations under sizing directory * Updated artifactory-federation (probes, port, embedded mode) diff --git a/charts/jfrog/artifactory-ha/Chart.yaml b/charts/jfrog/artifactory-ha/Chart.yaml index 75cefc785..40838ddf6 100644 --- a/charts/jfrog/artifactory-ha/Chart.yaml +++ b/charts/jfrog/artifactory-ha/Chart.yaml @@ -4,7 +4,7 @@ annotations: catalog.cattle.io/kube-version: '>= 1.19.0-0' catalog.cattle.io/release-name: artifactory-ha apiVersion: v2 -appVersion: 7.77.7 +appVersion: 7.77.8 dependencies: - condition: postgresql.enabled name: postgresql @@ -26,4 +26,4 @@ name: artifactory-ha sources: - https://github.com/jfrog/charts type: application -version: 107.77.7 +version: 107.77.8 diff --git a/charts/jfrog/artifactory-jcr/CHANGELOG.md b/charts/jfrog/artifactory-jcr/CHANGELOG.md index 774daedf1..27c983aad 100644 --- a/charts/jfrog/artifactory-jcr/CHANGELOG.md +++ b/charts/jfrog/artifactory-jcr/CHANGELOG.md @@ -1,7 +1,7 @@ # JFrog Container Registry Chart Changelog All changes to this chart will be documented in this file. -## [107.77.7] - Nov 23, 2023 +## [107.77.8] - Nov 23, 2023 * **IMPORTANT** * Added min kubeVersion ">= 1.19.0-0" in chart.yaml diff --git a/charts/jfrog/artifactory-jcr/Chart.yaml b/charts/jfrog/artifactory-jcr/Chart.yaml index 5f80e9877..f8503ef8c 100644 --- a/charts/jfrog/artifactory-jcr/Chart.yaml +++ b/charts/jfrog/artifactory-jcr/Chart.yaml @@ -4,11 +4,11 @@ annotations: catalog.cattle.io/kube-version: '>= 1.19.0-0' catalog.cattle.io/release-name: artifactory-jcr apiVersion: v2 -appVersion: 7.77.7 +appVersion: 7.77.8 dependencies: - name: artifactory repository: file://./charts/artifactory - version: 107.77.7 + version: 107.77.8 description: JFrog Container Registry home: https://jfrog.com/container-registry/ icon: https://raw.githubusercontent.com/jfrog/charts/ea5c3112c24a973f64f3ccd99747323db292a369/stable/artifactory-jcr/logo/jcr-logo.png @@ -27,4 +27,4 @@ name: artifactory-jcr sources: - https://github.com/jfrog/charts type: application -version: 107.77.7 +version: 107.77.8 diff --git a/charts/jfrog/artifactory-jcr/charts/artifactory/CHANGELOG.md b/charts/jfrog/artifactory-jcr/charts/artifactory/CHANGELOG.md index 45cdb8e74..92196917d 100644 --- a/charts/jfrog/artifactory-jcr/charts/artifactory/CHANGELOG.md +++ b/charts/jfrog/artifactory-jcr/charts/artifactory/CHANGELOG.md @@ -1,7 +1,7 @@ # JFrog Artifactory Chart Changelog All changes to this chart will be documented in this file. -## [107.77.7] - Feb 20, 2024 +## [107.77.8] - Feb 20, 2024 * Removed integration service * Added recommended postgresql sizing configurations under sizing directory * Updated artifactory-federation (probes, port, embedded mode) diff --git a/charts/jfrog/artifactory-jcr/charts/artifactory/Chart.yaml b/charts/jfrog/artifactory-jcr/charts/artifactory/Chart.yaml index 62560d9fc..884038da8 100644 --- a/charts/jfrog/artifactory-jcr/charts/artifactory/Chart.yaml +++ b/charts/jfrog/artifactory-jcr/charts/artifactory/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: 7.77.7 +appVersion: 7.77.8 dependencies: - condition: postgresql.enabled name: postgresql @@ -21,4 +21,4 @@ name: artifactory sources: - https://github.com/jfrog/charts type: application -version: 107.77.7 +version: 107.77.8 diff --git a/charts/kasten/k10/Chart.lock b/charts/kasten/k10/Chart.lock index f09911268..a661c3790 100644 --- a/charts/kasten/k10/Chart.lock +++ b/charts/kasten/k10/Chart.lock @@ -6,4 +6,4 @@ dependencies: repository: "" version: 25.12.0 digest: sha256:f3e6926f6a711f61ab0e6598105cbee8806113bb02992529f05c3645fe99161c -generated: "2024-03-25T18:11:54.998934672Z" +generated: "2024-03-26T03:51:42.656422238Z" diff --git a/charts/kasten/k10/Chart.yaml b/charts/kasten/k10/Chart.yaml index d332e28a7..8b87cdbcb 100644 --- a/charts/kasten/k10/Chart.yaml +++ b/charts/kasten/k10/Chart.yaml @@ -4,7 +4,7 @@ annotations: catalog.cattle.io/kube-version: '>= 1.17.0-0' catalog.cattle.io/release-name: k10 apiVersion: v2 -appVersion: 6.5.9 +appVersion: 6.5.10 dependencies: - condition: grafana.enabled name: grafana @@ -21,4 +21,4 @@ maintainers: - email: contact@kasten.io name: kastenIO name: k10 -version: 6.5.901 +version: 6.5.1001 diff --git a/charts/kasten/k10/templates/_k10_image_tag.tpl b/charts/kasten/k10/templates/_k10_image_tag.tpl index 56d3c4e42..78fda8fc9 100644 --- a/charts/kasten/k10/templates/_k10_image_tag.tpl +++ b/charts/kasten/k10/templates/_k10_image_tag.tpl @@ -1 +1 @@ -{{- define "k10.imageTag" -}}6.5.9{{- end -}} \ No newline at end of file +{{- define "k10.imageTag" -}}6.5.10{{- end -}} \ No newline at end of file diff --git a/charts/kubecost/cost-analyzer/Chart.yaml b/charts/kubecost/cost-analyzer/Chart.yaml index b4dde801a..0ad91823a 100644 --- a/charts/kubecost/cost-analyzer/Chart.yaml +++ b/charts/kubecost/cost-analyzer/Chart.yaml @@ -7,9 +7,9 @@ annotations: catalog.cattle.io/featured: "1" catalog.cattle.io/release-name: cost-analyzer apiVersion: v2 -appVersion: 2.1.1 +appVersion: 2.2.0 description: A Helm chart that sets up Kubecost, Prometheus, and Grafana to monitor cloud costs. icon: https://partner-charts.rancher.io/assets/logos/kubecost.png name: cost-analyzer -version: 2.1.1 +version: 2.2.0 diff --git a/charts/kubecost/cost-analyzer/README.md b/charts/kubecost/cost-analyzer/README.md index 3674e10a5..72da48c29 100644 --- a/charts/kubecost/cost-analyzer/README.md +++ b/charts/kubecost/cost-analyzer/README.md @@ -67,6 +67,8 @@ The following table lists commonly used configuration parameters for the Kubecos | `prometheusRule.enabled` | Set this to `true` to create PrometheusRule for Prometheus operator | `false` | | `prometheusRule.additionalLabels` | Additional labels that can be used so PrometheusRule will be discovered by Prometheus | `{}` | | `grafana.resources` | Grafana resource requests and limits. | `{}` | +| `grafana.serviceAccount.create` | If true, create a Service Account for Grafana. | `true` | +| `grafana.serviceAccount.name` | Grafana Service Account name. | `{}` | | `grafana.sidecar.datasources.defaultDatasourceEnabled` | Set this to `false` to disable creation of Prometheus datasource in Grafana | `true` | | `serviceAccount.create` | Set this to `false` if you want to create the service account `kubecost-cost-analyzer` on your own | `true` | | `tolerations` | node taints to tolerate | `[]` | diff --git a/charts/kubecost/cost-analyzer/ci/federatedetl-primary-netcosts-values.yaml b/charts/kubecost/cost-analyzer/ci/federatedetl-primary-netcosts-values.yaml index ef4f03856..6742df6f3 100644 --- a/charts/kubecost/cost-analyzer/ci/federatedetl-primary-netcosts-values.yaml +++ b/charts/kubecost/cost-analyzer/ci/federatedetl-primary-netcosts-values.yaml @@ -12,7 +12,6 @@ kubecostModel: federatedStorageConfigSecret: federated-store serviceAccount: # this example uses AWS IRSA, which creates a service account with rights to the s3 bucket. If using keys+secrets in the federated-store, set create: true create: true -kubecostDeployment: global: prometheus: enabled: true diff --git a/charts/kubecost/cost-analyzer/ci/statefulsets-cc.yaml b/charts/kubecost/cost-analyzer/ci/statefulsets-cc.yaml new file mode 100644 index 000000000..626a0c2e5 --- /dev/null +++ b/charts/kubecost/cost-analyzer/ci/statefulsets-cc.yaml @@ -0,0 +1,46 @@ +### This test is to verify that Kubecost aggregator is deployed as a StatefulSet, +### cluster controller is installed, and the various Prometheus components are installed. +global: + podAnnotations: + kubecost.io/test1: value1 + kubecost.io/test2: value2 + additionalLabels: + kubecosttest1: value1 + kubecosttest2: value2 + prometheus: + enabled: true + # fqdn: http://prometheus-operated.monitoring:9090 + grafana: # prometheus metrics will be local cluster only, disable grafana to save resources + enabled: false + proxy: false +kubecostProductConfigs: + clusterName: CLUSTER_NAME +kubecostAggregator: + deployMethod: statefulset +kubecostModel: + federatedStorageConfigSecret: federated-store +clusterController: + enabled: true + actionConfigs: + clusterTurndown: + - name: my-schedule2 + start: "2034-02-09T00:00:00Z" + end: "2034-02-09T01:00:00Z" + repeat: none +prometheus: + nodeExporter: + enabled: true + alertmanager: + enabled: true + configmapReload: + prometheus: + enabled: true + pushgateway: + enabled: true + server: + statefulSet: + enabled: true + global: + external_labels: + # cluster_id should be unique for all clusters and the same value as .kubecostProductConfigs.clusterName + cluster_id: CLUSTER_NAME \ No newline at end of file diff --git a/charts/kubecost/cost-analyzer/crds/cluster-turndown-crd.yaml b/charts/kubecost/cost-analyzer/crds/cluster-turndown-crd.yaml new file mode 100644 index 000000000..8c87644cc --- /dev/null +++ b/charts/kubecost/cost-analyzer/crds/cluster-turndown-crd.yaml @@ -0,0 +1,78 @@ +# TurndownSchedule Custom Resource Definition for persistence +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: turndownschedules.kubecost.com +spec: + group: kubecost.com + names: + kind: TurndownSchedule + singular: turndownschedule + plural: turndownschedules + shortNames: + - td + - tds + scope: Cluster + versions: + - name: v1alpha1 + served: true + storage: true + subresources: + status: {} + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + start: + type: string + format: date-time + end: + type: string + format: date-time + repeat: + type: string + enum: [none, daily, weekly] + status: + type: object + properties: + state: + type: string + lastUpdated: + format: date-time + type: string + current: + type: string + scaleDownId: + type: string + nextScaleDownTime: + format: date-time + type: string + scaleDownMetadata: + additionalProperties: + type: string + type: object + scaleUpID: + type: string + nextScaleUpTime: + format: date-time + type: string + scaleUpMetadata: + additionalProperties: + type: string + type: object + additionalPrinterColumns: + - name: State + type: string + description: The state of the turndownschedule + jsonPath: .status.state + - name: Next Turndown + type: string + description: The next turndown date-time + jsonPath: .status.nextScaleDownTime + - name: Next Turn Up + type: string + description: The next turn up date-time + jsonPath: .status.nextScaleUpTime diff --git a/charts/kubecost/cost-analyzer/old-grafana-values.yaml b/charts/kubecost/cost-analyzer/old-grafana-values.yaml deleted file mode 100644 index 1d5e1d026..000000000 --- a/charts/kubecost/cost-analyzer/old-grafana-values.yaml +++ /dev/null @@ -1,208 +0,0 @@ -## Expose the grafana service to be accessed from outside the cluster (LoadBalancer service). -## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it. -## ref: http://kubernetes.io/docs/user-guide/services/ -## -service: - type: ClusterIP - port: 80 - annotations: {} - labels: {} - -ingress: - enabled: false - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - labels: {} - path: / - pathType: Prefix - hosts: - - chart-example.local - tls: [] - # - secretName: chart-example-tls - # hosts: - # - chart-example.local - -resources: {} -# limits: -# cpu: 100m -# memory: 128Mi -# requests: -# cpu: 100m -# memory: 128Mi - -## Node labels for pod assignment -## ref: https://kubernetes.io/docs/user-guide/node-selection/ -# -nodeSelector: {} - -## Tolerations for pod assignment -## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ -## -tolerations: [] - -## Affinity for pod assignment -## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity -## -affinity: {} - -## Enable persistence using Persistent Volume Claims -## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ -## -persistence: - enabled: false - # storageClassName: default - # accessModes: - # - ReadWriteOnce - # size: 10Gi - # annotations: {} - # subPath: "" - # existingClaim: - -adminUser: admin -adminPassword: strongpassword - -## Use an alternate scheduler, e.g. "stork". -## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ -## -# schedulerName: - -## Extra environment variables that will be pass onto deployment pods -env: {} - -## The name of a secret in the same kubernetes namespace which contain values to be added to the environment -## This can be useful for auth tokens, etc -envFromSecret: "" - -## Additional grafana server secret mounts -# Defines additional mounts with secrets. Secrets must be manually created in the namespace. -extraSecretMounts: [] - # - name: secret-files - # mountPath: /etc/secrets - # secretName: grafana-secret-files - # readOnly: true - -## Pass the plugins you want installed as a list. -## -plugins: [] - # - digrich-bubblechart-panel - # - grafana-clock-panel - -## Configure grafana dashboard providers -## ref: http://docs.grafana.org/administration/provisioning/#dashboards -## -## `path` must be /var/lib/grafana/dashboards/ -## -dashboardProviders: {} -# dashboardproviders.yaml: -# apiVersion: 1 -# providers: -# - name: 'default' -# orgId: 1 -# folder: '' -# type: file -# disableDeletion: false -# editable: true -# options: -# path: /var/lib/grafana/dashboards/default - -## Configure grafana dashboard to import -## NOTE: To use dashboards you must also enable/configure dashboardProviders -## ref: https://grafana.com/dashboards -## -## dashboards per provider, use provider name as key. -## -dashboards: {} -# default: -# prometheus-stats: -# gnetId: 3662 -# revision: 2 -# datasource: Prometheus - -## Reference to external ConfigMap per provider. Use provider name as key and ConfiMap name as value. -## A provider dashboards must be defined either by external ConfigMaps or in values.yaml, not in both. -## ConfigMap data example: -## -## data: -## example-dashboard.json: | -## RAW_JSON -## -dashboardsConfigMaps: {} -# default: "" - -## LDAP Authentication can be enabled with the following values on grafana.ini -## NOTE: Grafana will fail to start if the value for ldap.toml is invalid - # auth.ldap: - # enabled: true - # allow_sign_up: true - # config_file: /etc/grafana/ldap.toml - -## Grafana's LDAP configuration -## Templated by the template in _helpers.tpl -## NOTE: To enable the grafana.ini must be configured with auth.ldap.enabled -## ref: http://docs.grafana.org/installation/configuration/#auth-ldap -## ref: http://docs.grafana.org/installation/ldap/#configuration -ldap: - # `existingSecret` is a reference to an existing secret containing the ldap configuration - # for Grafana in a key `ldap-toml`. - existingSecret: "" - # `config` is the content of `ldap.toml` that will be stored in the created secret - config: "" - # config: |- - # verbose_logging = true - - # [[servers]] - # host = "my-ldap-server" - # port = 636 - # use_ssl = true - # start_tls = false - # ssl_skip_verify = false - # bind_dn = "uid=%s,ou=users,dc=myorg,dc=com" - -## Grafana's SMTP configuration -## NOTE: To enable, grafana.ini must be configured with smtp.enabled -## ref: http://docs.grafana.org/installation/configuration/#smtp -smtp: - # `existingSecret` is a reference to an existing secret containing the smtp configuration - # for Grafana in keys `user` and `password`. - existingSecret: "" - -## Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders -## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards -sidecar: - image: - repository: kiwigrid/k8s-sidecar - tag: 1.25.4 - pullPolicy: IfNotPresent - resources: {} - dashboards: - enabled: false - # label that the configmaps with dashboards are marked with - label: grafana_dashboard - # folder in the pod that should hold the collected dashboards - folder: /tmp/dashboards - datasources: - enabled: false - # label that the configmaps with datasources are marked with - label: grafana_datasource - -## Grafana's primary configuration -## NOTE: values in map will be converted to ini format -## ref: http://docs.grafana.org/installation/configuration/ -## -grafana.ini: - paths: - data: /var/lib/grafana/data - logs: /var/log/grafana - plugins: /var/lib/grafana/plugins - provisioning: /etc/grafana/provisioning - analytics: - check_for_updates: true - log: - mode: console - grafana_net: - url: https://grafana.net - auth.anonymous: - enabled: true - org_role: Editor - org_name: Main Org. diff --git a/charts/kubecost/cost-analyzer/old-prometheus-values.yaml b/charts/kubecost/cost-analyzer/old-prometheus-values.yaml deleted file mode 100644 index bdd91396f..000000000 --- a/charts/kubecost/cost-analyzer/old-prometheus-values.yaml +++ /dev/null @@ -1,1352 +0,0 @@ -rbac: - create: true - -podSecurityPolicy: - enabled: false - -imagePullSecrets: -# - name: "image-pull-secret" - -## Define serviceAccount names for components. Defaults to component's fully qualified name. -## -serviceAccounts: - alertmanager: - create: true - name: - nodeExporter: - create: true - name: - pushgateway: - create: true - name: - server: - create: true - name: - ## Prometheus server ServiceAccount annotations. - ## Can be used for AWS IRSA annotations when using Remote Write mode with Amazon Managed Prometheus. - annotations: {} - -alertmanager: - ## If false, alertmanager will not be installed - ## - enabled: true - - strategy: - type: Recreate - rollingUpdate: null - - ## alertmanager container name - ## - name: alertmanager - - ## alertmanager container image - ## - image: - repository: quay.io/prometheus/alertmanager - tag: v0.26.0 - pullPolicy: IfNotPresent - - ## alertmanager priorityClassName - ## - priorityClassName: "" - - ## Additional alertmanager container arguments - ## - extraArgs: {} - - ## The URL prefix at which the container can be accessed. Useful in the case the '-web.external-url' includes a slug - ## so that the various internal URLs are still able to access as they are in the default case. - ## (Optional) - prefixURL: "" - - ## External URL which can access alertmanager - baseURL: "http://localhost:9093" - - ## Additional alertmanager container environment variable - ## For instance to add a http_proxy - ## - extraEnv: {} - - ## Additional alertmanager Secret mounts - # Defines additional mounts with secrets. Secrets must be manually created in the namespace. - extraSecretMounts: [] - # - name: secret-files - # mountPath: /etc/secrets - # subPath: "" - # secretName: alertmanager-secret-files - # readOnly: true - - ## ConfigMap override where fullname is {{.Release.Name}}-{{.Values.alertmanager.configMapOverrideName}} - ## Defining configMapOverrideName will cause templates/alertmanager-configmap.yaml - ## to NOT generate a ConfigMap resource - ## - configMapOverrideName: "" - - ## The name of a secret in the same kubernetes namespace which contains the Alertmanager config - ## Defining configFromSecret will cause templates/alertmanager-configmap.yaml - ## to NOT generate a ConfigMap resource - ## - configFromSecret: "" - - ## The configuration file name to be loaded to alertmanager - ## Must match the key within configuration loaded from ConfigMap/Secret - ## - configFileName: alertmanager.yml - - ingress: - ## If true, alertmanager Ingress will be created - ## - enabled: false - - ## alertmanager Ingress annotations - ## - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: 'true' - - ## alertmanager Ingress additional labels - ## - extraLabels: {} - - ## alertmanager Ingress hostnames with optional path - ## Must be provided if Ingress is enabled - ## - hosts: [] - # - alertmanager.domain.com - # - domain.com/alertmanager - - ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. - extraPaths: [] - # - path: /* - # backend: - # serviceName: ssl-redirect - # servicePort: use-annotation - - ## alertmanager Ingress TLS configuration - ## Secrets must be manually created in the namespace - ## - tls: [] - # - secretName: prometheus-alerts-tls - # hosts: - # - alertmanager.domain.com - - ## Alertmanager Deployment Strategy type - # strategy: - # type: Recreate - - ## Node tolerations for alertmanager scheduling to nodes with taints - ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - ## - tolerations: [] - # - key: "key" - # operator: "Equal|Exists" - # value: "value" - # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" - - ## Node labels for alertmanager pod assignment - ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - - ## Pod affinity - ## - affinity: {} - - ## PodDisruptionBudget settings - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ - ## - podDisruptionBudget: - enabled: false - maxUnavailable: 1 - - ## Use an alternate scheduler, e.g. "stork". - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - # schedulerName: - - persistentVolume: - ## If true, alertmanager will create/use a Persistent Volume Claim - ## If false, use emptyDir - ## - enabled: true - - ## alertmanager data Persistent Volume access modes - ## Must match those of existing PV or dynamic provisioner - ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ - ## - accessModes: - - ReadWriteOnce - - ## alertmanager data Persistent Volume Claim annotations - ## - annotations: {} - - ## alertmanager data Persistent Volume existing claim name - ## Requires alertmanager.persistentVolume.enabled: true - ## If defined, PVC must be created manually before volume will be bound - existingClaim: "" - - ## alertmanager data Persistent Volume mount root path - ## - mountPath: /data - - ## alertmanager data Persistent Volume size - ## - size: 2Gi - - ## alertmanager data Persistent Volume Storage Class - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - # storageClass: "-" - - ## alertmanager data Persistent Volume Binding Mode - ## If defined, volumeBindingMode: - ## If undefined (the default) or set to null, no volumeBindingMode spec is - ## set, choosing the default mode. - ## - # volumeBindingMode: "" - - ## Subdirectory of alertmanager data Persistent Volume to mount - ## Useful if the volume's root directory is not empty - ## - subPath: "" - - ## Annotations to be added to alertmanager pods - ## - podAnnotations: {} - ## Tell prometheus to use a specific set of alertmanager pods - ## instead of all alertmanager pods found in the same namespace - ## Useful if you deploy multiple releases within the same namespace - ## - ## prometheus.io/probe: alertmanager-teamA - - ## Labels to be added to Prometheus AlertManager pods - ## - podLabels: {} - - ## Specify if a Pod Security Policy for node-exporter must be created - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ - ## - podSecurityPolicy: - annotations: {} - ## Specify pod annotations - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl - ## - # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' - # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' - # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' - - ## Use a StatefulSet if replicaCount needs to be greater than 1 (see below) - ## - replicaCount: 1 - - statefulSet: - ## If true, use a statefulset instead of a deployment for pod management. - ## This allows to scale replicas to more than 1 pod - ## - enabled: false - - podManagementPolicy: OrderedReady - - ## Alertmanager headless service to use for the statefulset - ## - headless: - annotations: {} - labels: {} - - ## Enabling peer mesh service end points for enabling the HA alert manager - ## Ref: https://github.com/prometheus/alertmanager/blob/master/README.md - # enableMeshPeer : true - - servicePort: 80 - - ## alertmanager resource requests and limits - ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: {} - # limits: - # cpu: 10m - # memory: 32Mi - # requests: - # cpu: 10m - # memory: 32Mi - - ## Security context to be added to alertmanager pods - ## - securityContext: - runAsUser: 1001 - runAsNonRoot: true - runAsGroup: 1001 - fsGroup: 1001 - - service: - annotations: {} - labels: {} - clusterIP: "" - - ## Enabling peer mesh service end points for enabling the HA alert manager - ## Ref: https://github.com/prometheus/alertmanager/blob/master/README.md - # enableMeshPeer : true - - ## List of IP addresses at which the alertmanager service is available - ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips - ## - externalIPs: [] - - loadBalancerIP: "" - loadBalancerSourceRanges: [] - servicePort: 80 - # nodePort: 30000 - sessionAffinity: None - type: ClusterIP - -## Monitors ConfigMap changes and POSTs to a URL -configmapReload: - prometheus: - ## If false, the configmap-reload container will not be deployed - ## - enabled: true - - ## configmap-reload container name - ## - name: configmap-reload - - ## configmap-reload container image - ## - image: - repository: quay.io/prometheus-operator/prometheus-config-reloader - tag: v0.71.2 - pullPolicy: IfNotPresent - - ## Additional configmap-reload container arguments - ## - extraArgs: {} - ## Additional configmap-reload volume directories - ## - extraVolumeDirs: [] - - ## Additional configmap-reload mounts - ## - extraConfigmapMounts: [] - # - name: prometheus-alerts - # mountPath: /etc/alerts.d - # subPath: "" - # configMap: prometheus-alerts - # readOnly: true - - ## configmap-reload resource requests and limits - ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: {} - - ## configmap-reload container securityContext - containerSecurityContext: {} - - alertmanager: - ## If false, the configmap-reload container will not be deployed - ## - enabled: false - - ## configmap-reload container name - ## - name: configmap-reload - - ## configmap-reload container image - ## - image: - repository: quay.io/prometheus-operator/prometheus-config-reloader - tag: v0.71.2 - pullPolicy: IfNotPresent - - ## Additional configmap-reload container arguments - ## - extraArgs: {} - ## Additional configmap-reload volume directories - ## - extraVolumeDirs: [] - - - ## Additional configmap-reload mounts - ## - extraConfigmapMounts: [] - # - name: prometheus-alerts - # mountPath: /etc/alerts.d - # subPath: "" - # configMap: prometheus-alerts - # readOnly: true - - - ## configmap-reload resource requests and limits - ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: {} - -nodeExporter: - ## If false, node-exporter will not be installed - ## - enabled: true - - ## If true, node-exporter pods share the host network namespace - ## - hostNetwork: true - - ## If true, node-exporter pods share the host PID namespace - ## - hostPID: true - - ## node-exporter dns policy - ## - dnsPolicy: ClusterFirstWithHostNet - - ## node-exporter container name - ## - name: node-exporter - - ## node-exporter container image - ## - image: - repository: prom/node-exporter - tag: v1.7.0 - pullPolicy: IfNotPresent - - ## Specify if a Pod Security Policy for node-exporter must be created - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ - ## - podSecurityPolicy: - annotations: {} - ## Specify pod annotations - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl - ## - # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' - # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' - # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' - - ## node-exporter priorityClassName - ## - priorityClassName: "" - - ## Custom Update Strategy - ## - updateStrategy: - type: RollingUpdate - - ## Additional node-exporter container arguments - ## - extraArgs: {} - - ## Additional node-exporter hostPath mounts - ## - extraHostPathMounts: [] - # - name: textfile-dir - # mountPath: /srv/txt_collector - # hostPath: /var/lib/node-exporter - # readOnly: true - # mountPropagation: HostToContainer - - extraConfigmapMounts: [] - # - name: certs-configmap - # mountPath: /prometheus - # configMap: certs-configmap - # readOnly: true - - ## Node tolerations for node-exporter scheduling to nodes with taints - ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - ## - tolerations: [] - # - key: "key" - # operator: "Equal|Exists" - # value: "value" - # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" - - ## Node labels for node-exporter pod assignment - ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - - ## Annotations to be added to node-exporter pods - ## - podAnnotations: {} - - ## Labels to be added to node-exporter pods - ## - pod: - labels: {} - - ## PodDisruptionBudget settings - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ - ## - podDisruptionBudget: - enabled: false - maxUnavailable: 1 - - ## node-exporter resource limits & requests - ## Ref: https://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: {} - # limits: - # cpu: 200m - # memory: 50Mi - # requests: - # cpu: 100m - # memory: 30Mi - - ## Security context to be added to node-exporter pods - ## - securityContext: {} - # runAsUser: 0 - - service: - annotations: - prometheus.io/scrape: "true" - labels: {} - - # Exposed as a headless service: - # https://kubernetes.io/docs/concepts/services-networking/service/#headless-services - clusterIP: None - - ## List of IP addresses at which the node-exporter service is available - ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips - ## - externalIPs: [] - - hostPort: 9100 - loadBalancerIP: "" - loadBalancerSourceRanges: [] - servicePort: 9100 - type: ClusterIP - -server: - ## Prometheus server container name - ## - enabled: true - name: server - sidecarContainers: - strategy: - type: Recreate - rollingUpdate: null - - ## Prometheus server container image - ## - image: - repository: quay.io/prometheus/prometheus - tag: v2.49.1 - pullPolicy: IfNotPresent - - ## prometheus server priorityClassName - ## - priorityClassName: "" - - ## The URL prefix at which the container can be accessed. Useful in the case the '-web.external-url' includes a slug - ## so that the various internal URLs are still able to access as they are in the default case. - ## (Optional) - prefixURL: "" - - ## External URL which can access alertmanager - ## Maybe same with Ingress host name - baseURL: "" - - ## Additional server container environment variables - ## - ## You specify this manually like you would a raw deployment manifest. - ## This means you can bind in environment variables from secrets. - ## - ## e.g. static environment variable: - ## - name: DEMO_GREETING - ## value: "Hello from the environment" - ## - ## e.g. secret environment variable: - ## - name: USERNAME - ## valueFrom: - ## secretKeyRef: - ## name: mysecret - ## key: username - env: [] - - extraFlags: - - web.enable-lifecycle - ## web.enable-admin-api flag controls access to the administrative HTTP API which includes functionality such as - ## deleting time series. This is disabled by default. - # - web.enable-admin-api - ## - ## storage.tsdb.no-lockfile flag controls BD locking - # - storage.tsdb.no-lockfile - ## - ## storage.tsdb.wal-compression flag enables compression of the write-ahead log (WAL) - # - storage.tsdb.wal-compression - - ## Path to a configuration file on prometheus server container FS - configPath: /etc/config/prometheus.yml - - global: - ## How frequently to scrape targets by default - ## - scrape_interval: 1m - ## How long until a scrape request times out - ## - scrape_timeout: 60s - ## How frequently to evaluate rules - ## - evaluation_interval: 1m - ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write - ## - remoteWrite: {} - ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_read - ## - remoteRead: {} - - ## Additional Prometheus server container arguments - ## - extraArgs: {} - - ## Additional InitContainers to initialize the pod - ## - extraInitContainers: [] - - ## Additional Prometheus server Volume mounts - ## - extraVolumeMounts: [] - - ## Additional Prometheus server Volumes - ## - extraVolumes: [] - - ## Additional Prometheus server hostPath mounts - ## - extraHostPathMounts: [] - # - name: certs-dir - # mountPath: /etc/kubernetes/certs - # subPath: "" - # hostPath: /etc/kubernetes/certs - # readOnly: true - - extraConfigmapMounts: [] - # - name: certs-configmap - # mountPath: /prometheus - # subPath: "" - # configMap: certs-configmap - # readOnly: true - - ## Additional Prometheus server Secret mounts - # Defines additional mounts with secrets. Secrets must be manually created in the namespace. - extraSecretMounts: [] - # - name: secret-files - # mountPath: /etc/secrets - # subPath: "" - # secretName: prom-secret-files - # readOnly: true - - ## ConfigMap override where fullname is {{.Release.Name}}-{{.Values.server.configMapOverrideName}} - ## Defining configMapOverrideName will cause templates/server-configmap.yaml - ## to NOT generate a ConfigMap resource - ## - configMapOverrideName: "" - - ingress: - ## If true, Prometheus server Ingress will be created - ## - enabled: false - - ## Prometheus server Ingress annotations - ## - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: 'true' - - ## Prometheus server Ingress additional labels - ## - extraLabels: {} - - ## Prometheus server Ingress hostnames with optional path - ## Must be provided if Ingress is enabled - ## - hosts: [] - # - prometheus.domain.com - # - domain.com/prometheus - - ## PathType determines the interpretation of the Path matching - pathType: "Prefix" - - ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. - extraPaths: [] - # - path: /* - # backend: - # serviceName: ssl-redirect - # servicePort: use-annotation - - ## Prometheus server Ingress TLS configuration - ## Secrets must be manually created in the namespace - ## - tls: [] - # - secretName: prometheus-server-tls - # hosts: - # - prometheus.domain.com - - ## Server Deployment Strategy type - # strategy: - # type: Recreate - - ## Node tolerations for server scheduling to nodes with taints - ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - ## - tolerations: [] - # - key: "key" - # operator: "Equal|Exists" - # value: "value" - # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" - - ## Node labels for Prometheus server pod assignment - ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - - ## Pod affinity - ## - affinity: {} - - ## PodDisruptionBudget settings - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ - ## - podDisruptionBudget: - enabled: false - maxUnavailable: 1 - - ## Use an alternate scheduler, e.g. "stork". - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - # schedulerName: - - persistentVolume: - ## If true, Prometheus server will create/use a Persistent Volume Claim - ## If false, use emptyDir - ## - enabled: true - - ## Prometheus server data Persistent Volume access modes - ## Must match those of existing PV or dynamic provisioner - ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ - ## - accessModes: - - ReadWriteOnce - - ## Prometheus server data Persistent Volume annotations - ## - annotations: {} - - ## Prometheus server data Persistent Volume existing claim name - ## Requires server.persistentVolume.enabled: true - ## If defined, PVC must be created manually before volume will be bound - existingClaim: "" - - ## Prometheus server data Persistent Volume mount root path - ## - mountPath: /data - - ## Prometheus server data Persistent Volume size - ## - size: 8Gi - - ## Prometheus server data Persistent Volume Storage Class - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - # storageClass: "-" - - ## Prometheus server data Persistent Volume Binding Mode - ## If defined, volumeBindingMode: - ## If undefined (the default) or set to null, no volumeBindingMode spec is - ## set, choosing the default mode. - ## - # volumeBindingMode: "" - - ## Subdirectory of Prometheus server data Persistent Volume to mount - ## Useful if the volume's root directory is not empty - ## - subPath: "" - - emptyDir: - sizeLimit: "" - - ## Annotations to be added to Prometheus server pods - ## - podAnnotations: {} - # iam.amazonaws.com/role: prometheus - - ## Labels to be added to Prometheus server pods - ## - podLabels: {} - - ## Prometheus AlertManager configuration - ## - alertmanagers: [] - - ## Specify if a Pod Security Policy for node-exporter must be created - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ - ## - podSecurityPolicy: - annotations: {} - ## Specify pod annotations - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl - ## - # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' - # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' - # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' - - ## Use a StatefulSet if replicaCount needs to be greater than 1 (see below) - ## - replicaCount: 1 - - statefulSet: - ## If true, use a statefulset instead of a deployment for pod management. - ## This allows to scale replicas to more than 1 pod - ## - enabled: false - - annotations: {} - labels: {} - podManagementPolicy: OrderedReady - - ## Alertmanager headless service to use for the statefulset - ## - headless: - annotations: {} - labels: {} - servicePort: 80 - - ## Prometheus server readiness and liveness probe initial delay and timeout - ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ - ## - readinessProbeInitialDelay: 30 - readinessProbeTimeout: 30 - readinessProbeFailureThreshold: 3 - readinessProbeSuccessThreshold: 1 - livenessProbeInitialDelay: 30 - livenessProbeTimeout: 30 - livenessProbeFailureThreshold: 3 - livenessProbeSuccessThreshold: 1 - - ## Prometheus server resource requests and limits - ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: {} - # limits: - # cpu: 500m - # memory: 512Mi - # requests: - # cpu: 500m - # memory: 512Mi - - ## Vertical Pod Autoscaler config - ## Ref: https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler - verticalAutoscaler: - ## If true a VPA object will be created for the controller (either StatefulSet or Deployemnt, based on above configs) - enabled: false - # updateMode: "Auto" - # containerPolicies: - # - containerName: 'prometheus-server' - - ## Security context to be added to server pods - ## - securityContext: {} - # runAsUser: 1001 - # runAsNonRoot: true - # runAsGroup: 1001 - # fsGroup: 1001 - - containerSecurityContext: {} - - service: - annotations: {} - labels: {} - clusterIP: "" - - ## List of IP addresses at which the Prometheus server service is available - ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips - ## - externalIPs: [] - - loadBalancerIP: "" - loadBalancerSourceRanges: [] - servicePort: 80 - sessionAffinity: None - type: ClusterIP - - ## Enable gRPC port on service to allow auto discovery with thanos-querier - gRPC: - enabled: false - servicePort: 10901 - # nodePort: 10901 - - ## If using a statefulSet (statefulSet.enabled=true), configure the - ## service to connect to a specific replica to have a consistent view - ## of the data. - statefulsetReplica: - enabled: false - replica: 0 - - ## Prometheus server pod termination grace period - ## - terminationGracePeriodSeconds: 300 - - ## Prometheus data retention period (default if not specified is 15 days) - ## - retention: "15d" - -pushgateway: - ## If false, pushgateway will not be installed - ## - enabled: true - - ## Use an alternate scheduler, e.g. "stork". - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - # schedulerName: - - ## pushgateway container name - ## - name: pushgateway - - ## pushgateway container image - ## - image: - repository: prom/pushgateway - tag: v1.6.2 - pullPolicy: IfNotPresent - - ## pushgateway priorityClassName - ## - priorityClassName: "" - - ## Additional pushgateway container arguments - ## - ## for example: persistence.file: /data/pushgateway.data - extraArgs: {} - - ingress: - ## If true, pushgateway Ingress will be created - ## - enabled: false - - ## pushgateway Ingress annotations - ## - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: 'true' - - ## pushgateway Ingress hostnames with optional path - ## Must be provided if Ingress is enabled - ## - hosts: [] - # - pushgateway.domain.com - # - domain.com/pushgateway - - ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. - extraPaths: [] - # - path: /* - # backend: - # serviceName: ssl-redirect - # servicePort: use-annotation - - ## pushgateway Ingress TLS configuration - ## Secrets must be manually created in the namespace - ## - tls: [] - # - secretName: prometheus-alerts-tls - # hosts: - # - pushgateway.domain.com - - ## Node tolerations for pushgateway scheduling to nodes with taints - ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - ## - tolerations: [] - # - key: "key" - # operator: "Equal|Exists" - # value: "value" - # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" - - ## Node labels for pushgateway pod assignment - ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - - ## Annotations to be added to pushgateway pods - ## - podAnnotations: {} - - ## Specify if a Pod Security Policy for node-exporter must be created - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ - ## - podSecurityPolicy: - annotations: {} - ## Specify pod annotations - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl - ## - # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' - # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' - # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' - - replicaCount: 1 - - ## PodDisruptionBudget settings - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ - ## - podDisruptionBudget: - enabled: false - maxUnavailable: 1 - - ## pushgateway resource requests and limits - ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: {} - # limits: - # cpu: 10m - # memory: 32Mi - # requests: - # cpu: 10m - # memory: 32Mi - - ## Security context to be added to push-gateway pods - ## - securityContext: - runAsUser: 1001 - runAsNonRoot: true - - service: - annotations: - prometheus.io/probe: pushgateway - labels: {} - clusterIP: "" - - ## List of IP addresses at which the pushgateway service is available - ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips - ## - externalIPs: [] - - loadBalancerIP: "" - loadBalancerSourceRanges: [] - servicePort: 9091 - type: ClusterIP - - strategy: - type: Recreate - rollingUpdate: null - - - persistentVolume: - ## If true, pushgateway will create/use a Persistent Volume Claim - ## If false, use emptyDir - ## - enabled: false - - ## pushgateway data Persistent Volume access modes - ## Must match those of existing PV or dynamic provisioner - ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ - ## - accessModes: - - ReadWriteOnce - - ## pushgateway data Persistent Volume Claim annotations - ## - annotations: {} - - ## pushgateway data Persistent Volume existing claim name - ## Requires pushgateway.persistentVolume.enabled: true - ## If defined, PVC must be created manually before volume will be bound - existingClaim: "" - - ## pushgateway data Persistent Volume mount root path - ## - mountPath: /data - - ## pushgateway data Persistent Volume size - ## - size: 2Gi - - ## pushgateway data Persistent Volume Storage Class - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - # storageClass: "-" - - ## pushgateway data Persistent Volume Binding Mode - ## If defined, volumeBindingMode: - ## If undefined (the default) or set to null, no volumeBindingMode spec is - ## set, choosing the default mode. - ## - # volumeBindingMode: "" - - ## Subdirectory of pushgateway data Persistent Volume to mount - ## Useful if the volume's root directory is not empty - ## - subPath: "" - - -## alertmanager ConfigMap entries -## -alertmanagerFiles: - alertmanager.yml: - global: {} - # slack_api_url: '' - - receivers: - - name: default-receiver - # slack_configs: - # - channel: '@you' - # send_resolved: true - - route: - group_wait: 10s - group_interval: 5m - receiver: default-receiver - repeat_interval: 3h - -## Prometheus server ConfigMap entries -## -serverFiles: - - ## Alerts configuration - ## Ref: https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/ - alerting_rules.yml: {} - # groups: - # - name: Instances - # rules: - # - alert: InstanceDown - # expr: up == 0 - # for: 5m - # labels: - # severity: page - # annotations: - # description: '{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 5 minutes.' - # summary: 'Instance {{ $labels.instance }} down' - ## DEPRECATED DEFAULT VALUE, unless explicitly naming your files, please use alerting_rules.yml - alerts: {} - - ## Records configuration - ## Ref: https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/ - recording_rules.yml: {} - ## DEPRECATED DEFAULT VALUE, unless explicitly naming your files, please use recording_rules.yml - rules: {} - - prometheus.yml: - rule_files: - - /etc/config/recording_rules.yml - - /etc/config/alerting_rules.yml - ## Below two files are DEPRECATED will be removed from this default values file - - /etc/config/rules - - /etc/config/alerts - - scrape_configs: - - job_name: prometheus - static_configs: - - targets: - - localhost:9090 - - # A scrape configuration for running Prometheus on a Kubernetes cluster. - # This uses separate scrape configs for cluster components (i.e. API server, node) - # and services to allow each to use different authentication configs. - # - # Kubernetes labels will be added as Prometheus labels on metrics via the - # `labelmap` relabeling action. - - - job_name: 'kubernetes-nodes-cadvisor' - - # Default to scraping over https. If required, just disable this or change to - # `http`. - scheme: https - - # This TLS & bearer token file config is used to connect to the actual scrape - # endpoints for cluster components. This is separate to discovery auth - # configuration because discovery & scraping are two separate concerns in - # Prometheus. The discovery auth config is automatic if Prometheus runs inside - # the cluster. Otherwise, more config options have to be provided within the - # . - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - # If your node certificates are self-signed or use a different CA to the - # master CA, then disable certificate verification below. Note that - # certificate verification is an integral part of a secure infrastructure - # so this should only be disabled in a controlled environment. You can - # disable certificate verification by uncommenting the line below. - # - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - - # This configuration will work only on kubelet 1.7.3+ - # As the scrape endpoints for cAdvisor have changed - # if you are using older version you need to change the replacement to - # replacement: /api/v1/nodes/$1:4194/proxy/metrics - # more info here https://github.com/coreos/prometheus-operator/issues/633 - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - - metric_relabel_configs: - - source_labels: [ __name__ ] - regex: (container_cpu_usage_seconds_total|container_memory_working_set_bytes|container_network_receive_errors_total|container_network_transmit_errors_total|container_network_receive_packets_dropped_total|container_network_transmit_packets_dropped_total|container_memory_usage_bytes|container_cpu_cfs_throttled_periods_total|container_cpu_cfs_periods_total|container_fs_usage_bytes|container_fs_limit_bytes|container_cpu_cfs_periods_total|container_fs_inodes_free|container_fs_inodes_total|container_fs_usage_bytes|container_fs_limit_bytes|container_cpu_cfs_throttled_periods_total|container_cpu_cfs_periods_total|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_fs_inodes_free|container_fs_inodes_total|container_fs_usage_bytes|container_fs_limit_bytes|container_spec_cpu_shares|container_spec_memory_limit_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_fs_reads_bytes_total|container_network_receive_bytes_total|container_fs_writes_bytes_total|container_fs_reads_bytes_total|cadvisor_version_info|kubecost_pv_info) - action: keep - - source_labels: [ container ] - target_label: container_name - regex: (.+) - action: replace - - source_labels: [ pod ] - target_label: pod_name - regex: (.+) - action: replace - - # A scrape configuration for running Prometheus on a Kubernetes cluster. - # This uses separate scrape configs for cluster components (i.e. API server, node) - # and services to allow each to use different authentication configs. - # - # Kubernetes labels will be added as Prometheus labels on metrics via the - # `labelmap` relabeling action. - - - job_name: 'kubernetes-nodes' - - # Default to scraping over https. If required, just disable this or change to - # `http`. - scheme: https - - # This TLS & bearer token file config is used to connect to the actual scrape - # endpoints for cluster components. This is separate to discovery auth - # configuration because discovery & scraping are two separate concerns in - # Prometheus. The discovery auth config is automatic if Prometheus runs inside - # the cluster. Otherwise, more config options have to be provided within the - # . - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - # If your node certificates are self-signed or use a different CA to the - # master CA, then disable certificate verification below. Note that - # certificate verification is an integral part of a secure infrastructure - # so this should only be disabled in a controlled environment. You can - # disable certificate verification by uncommenting the line below. - # - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics - - metric_relabel_configs: - - source_labels: [ __name__ ] - regex: (kubelet_volume_stats_used_bytes) # this metric is in alpha - action: keep - - # Scrape config for service endpoints. - # - # The relabeling allows the actual service scrape endpoint to be configured - # via the following annotations: - # - # * `prometheus.io/scrape`: Only scrape services that have a value of `true` - # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need - # to set this to `https` & most likely set the `tls_config` of the scrape config. - # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. - # * `prometheus.io/port`: If the metrics are exposed on a different port to the - # service then set this appropriately. - - job_name: 'kubernetes-service-endpoints' - - kubernetes_sd_configs: - - role: endpoints - - relabel_configs: - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape] - action: keep - regex: true - - source_labels: [__meta_kubernetes_endpoints_name] - action: keep - regex: (.*node-exporter|kubecost-network-costs) - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] - action: replace - target_label: __scheme__ - regex: (https?) - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] - action: replace - target_label: __metrics_path__ - regex: (.+) - - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] - action: replace - target_label: __address__ - regex: ([^:]+)(?::\d+)?;(\d+) - replacement: $1:$2 - - action: labelmap - regex: __meta_kubernetes_service_label_(.+) - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: kubernetes_namespace - - source_labels: [__meta_kubernetes_service_name] - action: replace - target_label: kubernetes_name - - source_labels: [__meta_kubernetes_pod_node_name] - action: replace - target_label: kubernetes_node - metric_relabel_configs: - - source_labels: [ __name__ ] - regex: (container_cpu_allocation|container_cpu_usage_seconds_total|container_fs_limit_bytes|container_fs_writes_bytes_total|container_gpu_allocation|container_memory_allocation_bytes|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|DCGM_FI_DEV_GPU_UTIL|deployment_match_labels|kube_daemonset_status_desired_number_scheduled|kube_daemonset_status_number_ready|kube_deployment_spec_replicas|kube_deployment_status_replicas|kube_deployment_status_replicas_available|kube_job_status_failed|kube_namespace_annotations|kube_namespace_labels|kube_node_info|kube_node_labels|kube_node_status_allocatable|kube_node_status_allocatable_cpu_cores|kube_node_status_allocatable_memory_bytes|kube_node_status_capacity|kube_node_status_capacity_cpu_cores|kube_node_status_capacity_memory_bytes|kube_node_status_condition|kube_persistentvolume_capacity_bytes|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_limits_cpu_cores|kube_pod_container_resource_limits_memory_bytes|kube_pod_container_resource_requests|kube_pod_container_resource_requests_cpu_cores|kube_pod_container_resource_requests_memory_bytes|kube_pod_container_status_restarts_total|kube_pod_container_status_running|kube_pod_container_status_terminated_reason|kube_pod_labels|kube_pod_owner|kube_pod_status_phase|kube_replicaset_owner|kube_statefulset_replicas|kube_statefulset_status_replicas|kubecost_cluster_info|kubecost_cluster_management_cost|kubecost_cluster_memory_working_set_bytes|kubecost_load_balancer_cost|kubecost_network_internet_egress_cost|kubecost_network_region_egress_cost|kubecost_network_zone_egress_cost|kubecost_node_is_spot|kubecost_pod_network_egress_bytes_total|node_cpu_hourly_cost|node_cpu_seconds_total|node_disk_reads_completed|node_disk_reads_completed_total|node_disk_writes_completed|node_disk_writes_completed_total|node_filesystem_device_error|node_gpu_count|node_gpu_hourly_cost|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_network_transmit_bytes_total|node_ram_hourly_cost|node_total_hourly_cost|pod_pvc_allocation|pv_hourly_cost|service_selector_labels|statefulSet_match_labels|kubecost_pv_info|up) - action: keep - -# adds additional scrape configs to prometheus.yml -# must be a string so you have to add a | after extraScrapeConfigs: -# example adds prometheus-blackbox-exporter scrape config -extraScrapeConfigs: - # - job_name: 'prometheus-blackbox-exporter' - # metrics_path: /probe - # params: - # module: [http_2xx] - # static_configs: - # - targets: - # - https://example.com - # relabel_configs: - # - source_labels: [__address__] - # target_label: __param_target - # - source_labels: [__param_target] - # target_label: instance - # - target_label: __address__ - # replacement: prometheus-blackbox-exporter:9115 - -# Adds option to add alert_relabel_configs to avoid duplicate alerts in alertmanager -# useful in H/A prometheus with different external labels but the same alerts -alertRelabelConfigs: - # alert_relabel_configs: - # - source_labels: [dc] - # regex: (.+)\d+ - # target_label: dc - -networkPolicy: - ## Enable creation of NetworkPolicy resources. - ## - enabled: false diff --git a/charts/kubecost/cost-analyzer/templates/NOTES.txt b/charts/kubecost/cost-analyzer/templates/NOTES.txt index 5e9aa2476..44761fadd 100644 --- a/charts/kubecost/cost-analyzer/templates/NOTES.txt +++ b/charts/kubecost/cost-analyzer/templates/NOTES.txt @@ -6,6 +6,7 @@ {{- include "gcpCloudIntegrationCheck" . -}} {{- include "azureCloudIntegrationCheck" . -}} {{- include "federatedStorageConfigSecretCheck" . -}} +{{- include "prometheusRetentionCheck" . -}} {{- $servicePort := .Values.service.port | default 9090 }} Kubecost {{ .Chart.Version }} has been successfully installed. diff --git a/charts/kubecost/cost-analyzer/templates/_helpers.tpl b/charts/kubecost/cost-analyzer/templates/_helpers.tpl index 217e15ec2..6874d658b 100644 --- a/charts/kubecost/cost-analyzer/templates/_helpers.tpl +++ b/charts/kubecost/cost-analyzer/templates/_helpers.tpl @@ -6,6 +6,8 @@ Set important variables before starting main templates {{- define "aggregator.deployMethod" -}} {{- if (.Values.federatedETL).primaryCluster }} {{- printf "statefulset" }} + {{- else if or ((.Values.federatedETL).agentOnly) (.Values.agent) (.Values.cloudAgent) }} + {{- printf "disabled" }} {{- else if (not .Values.kubecostAggregator) }} {{- printf "singlepod" }} {{- else if .Values.kubecostAggregator.enabled }} @@ -21,6 +23,14 @@ Set important variables before starting main templates {{- end }} {{- end }} +{{- define "frontend.deployMethod" -}} + {{- if eq .Values.kubecostFrontend.deployMethod "haMode" -}} + {{- printf "haMode" -}} + {{- else -}} + {{- printf "singlepod" -}} + {{- end -}} +{{- end -}} + {{/* Kubecost 2.0 preconditions */}} @@ -100,7 +110,9 @@ Kubecost 2.0 preconditions {{- if ((.Values.kubecostDeployment).statefulSet).enabled -}} {{- fail "\nIn Kubecost 2.0, kubecostDeployment does not support running as a statefulSet. Please reach out to support to discuss upgrade paths." -}} {{- end -}} - + {{- if and (eq (include "aggregator.deployMethod" .) "statefulset") (.Values.federatedETL).agentOnly }} + {{- fail "\nKubecost does not support running federatedETL.agentOnly with the aggregator statefulset" }} + {{- end }} {{- end -}} {{- define "cloudIntegrationFromProductConfigs" }} @@ -200,6 +212,38 @@ support templating a chart which uses the lookup function. {{- end -}} {{- end -}} +{{/* + Ensure that the Prometheus retention is not set too low +*/}} +{{- define "prometheusRetentionCheck" }} +{{- if ((.Values.prometheus).server).enabled }} + + {{- $retention := .Values.prometheus.server.retention }} + {{- $etlHourlyDurationHours := (int .Values.kubecostModel.etlHourlyStoreDurationHours) }} + + {{- if (hasSuffix "d" $retention) }} + {{- $retentionDays := (int (trimSuffix "d" $retention)) }} + {{- if lt $retentionDays 3 }} + {{- fail (printf "With a daily resolution, Prometheus retention must be set >= 3 days. Provided retention is %s" $retention) }} + {{- else if le (mul $retentionDays 24) $etlHourlyDurationHours }} + {{- fail (printf "Prometheus retention (%s) must be greater than .Values.kubecostModel.etlHourlyStoreDurationHours (%d)" $retention $etlHourlyDurationHours) }} + {{- end }} + + {{- else if (hasSuffix "h" $retention) }} + {{- $retentionHours := (int (trimSuffix "h" $retention)) }} + {{- if lt $retentionHours 50 }} + {{- fail (printf "With an hourly resolution, Prometheus retention must be set >= 50 hours. Provided retention is %s" $retention) }} + {{- else if le $retentionHours $etlHourlyDurationHours }} + {{- fail (printf "Prometheus retention (%s) must be greater than .Values.kubecostModel.etlHourlyStoreDurationHours (%d)" $retention $etlHourlyDurationHours) }} + {{- end }} + + {{- else }} + {{- fail "prometheus.server.retention must be set in days (e.g. 5d) or hours (e.g. 97h)"}} + + {{- end }} +{{- end }} +{{- end }} + {{/* Expand the name of the chart. */}} @@ -218,6 +262,9 @@ Expand the name of the chart. {{- define "forecasting.name" -}} {{- default "forecasting" | trunc 63 | trimSuffix "-" -}} {{- end -}} +{{- define "frontend.name" -}} +{{- default "frontend" | trunc 63 | trimSuffix "-" -}} +{{- end -}} {{/* Create a default fully qualified app name. @@ -259,6 +306,9 @@ If release name contains chart name it will be used as a full name. {{- define "forecasting.fullname" -}} {{- printf "%s-%s" .Release.Name (include "forecasting.name" .) | trunc 63 | trimSuffix "-" -}} {{- end -}} +{{- define "frontend.fullname" -}} +{{- printf "%s-%s" .Release.Name (include "frontend.name" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} {{/* Create the fully qualified name for Prometheus server service. @@ -311,6 +361,10 @@ Create the fully qualified name for Prometheus alertmanager service. {{- end -}} {{- end -}} +{{- define "frontend.serviceName" -}} +{{ include "frontend.fullname" . }} +{{- end -}} + {{- define "diagnostics.serviceName" -}} {{- printf "%s-%s" .Release.Name "diagnostics" | trunc 63 | trimSuffix "-" -}} {{- end -}} @@ -454,9 +508,6 @@ app.kubernetes.io/instance: {{ .Release.Name }} app: diagnostics {{- end }} -{{/* -{{- end -}} - {{/* Create the selector labels. */}} @@ -466,6 +517,15 @@ app.kubernetes.io/instance: {{ .Release.Name }} app: cost-analyzer {{- end -}} +{{/* +Create the selector labels for haMode frontend. +*/}} +{{- define "frontend.selectorLabels" -}} +app.kubernetes.io/name: {{ include "frontend.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app: cost-analyzer +{{- end -}} + {{- define "aggregator.selectorLabels" -}} {{- if eq (include "aggregator.deployMethod" .) "statefulset" }} app.kubernetes.io/name: {{ include "aggregator.name" . }} @@ -795,10 +855,10 @@ If release name contains chart name it will be used as a full name. Create the name of the service account */}} {{- define "grafana.serviceAccountName" -}} -{{- if .Values.serviceAccount.create -}} - {{ default (include "grafana.fullname" .) .Values.serviceAccount.name }} +{{- if .Values.grafana.serviceAccount.create -}} + {{ default (include "grafana.fullname" .) .Values.grafana.serviceAccount.name }} {{- else -}} - {{ default "default" .Values.serviceAccount.name }} + {{ default "default" .Values.grafana.serviceAccount.name }} {{- end -}} {{- end -}} @@ -880,6 +940,10 @@ Begin Kubecost 2.0 templates # of the init container that gives everything under /var/configs 777. mountPath: /var/configs/waterfowl {{- end }} + {{- if and ((.Values.kubecostProductConfigs).productKey).enabled ((.Values.kubecostProductConfigs).productKey).secretname (eq (include "aggregator.deployMethod" .) "statefulset") }} + - name: productkey-secret + mountPath: /var/configs/productkey + {{- end }} {{- if .Values.saml }} {{- if .Values.saml.enabled }} {{- if .Values.saml.secretName }} @@ -910,7 +974,7 @@ Begin Kubecost 2.0 templates {{- if .Values.oidc.enabled }} - name: oidc-config mountPath: /var/configs/oidc - {{- if .Values.oidc.secretName }} + {{- if or .Values.oidc.existingCustomSecret.name .Values.oidc.secretName }} - name: oidc-client-secret mountPath: /var/configs/oidc-client-secret {{- end }} @@ -928,6 +992,10 @@ Begin Kubecost 2.0 templates name: {{ .Values.prometheus.server.clusterIDConfigmap }} key: CLUSTER_ID {{- end }} + {{- if and ((.Values.kubecostProductConfigs).productKey).mountPath (eq (include "aggregator.deployMethod" .) "statefulset") }} + - name: PRODUCT_KEY_MOUNT_PATH + value: {{ .Values.kubecostProductConfigs.productKey.mountPath }} + {{- end }} {{- if (gt (int .Values.kubecostAggregator.numDBCopyPartitions) 0) }} - name: NUM_DB_COPY_CHUNKS value: {{ .Values.kubecostAggregator.numDBCopyPartitions | quote }} @@ -960,6 +1028,12 @@ Begin Kubecost 2.0 templates - name: no_proxy value: {{ .Values.systemProxy.noProxy }} {{- end }} + {{- if ((.Values.kubecostProductConfigs).carbonEstimates) }} + - name: CARBON_ESTIMATES_ENABLED + value: "true" + {{- end }} + - name: CUSTOM_COST_ENABLED + value: {{ .Values.kubecostModel.plugins.enabled | quote }} {{- if .Values.kubecostAggregator.extraEnv -}} {{- toYaml .Values.kubecostAggregator.extraEnv | nindent 4 }} {{- end }} @@ -1043,6 +1117,15 @@ Begin Kubecost 2.0 templates {{- define "aggregator.jaeger.sidecarContainerTemplate" }} - name: embedded-jaeger + env: + - name: SPAN_STORAGE_TYPE + value: badger + - name: BADGER_EPHEMERAL + value: "true" + - name: BADGER_DIRECTORY_VALUE + value: /tmp/badger/data + - name: BADGER_DIRECTORY_KEY + value: /tmp/badger/key securityContext: {{- toYaml .Values.kubecostAggregator.jaeger.containerSecurityContext | nindent 4 }} image: {{ .Values.kubecostAggregator.jaeger.image }}:{{ .Values.kubecostAggregator.jaeger.imageVersion }} @@ -1083,6 +1166,10 @@ Begin Kubecost 2.0 templates protocol: TCP resources: {{- toYaml .Values.kubecostAggregator.cloudCost.resources | nindent 4 }} + securityContext: + {{- if .Values.global.containerSecurityContext }} + {{- toYaml .Values.global.containerSecurityContext | nindent 4 }} + {{- end }} volumeMounts: - name: persistent-configs mountPath: /var/configs @@ -1099,6 +1186,18 @@ Begin Kubecost 2.0 templates - name: cloud-integration mountPath: /var/configs/cloud-integration {{- end }} + {{- if .Values.kubecostModel.plugins.enabled }} + - mountPath: {{ .Values.kubecostModel.plugins.folder }} + name: plugins-dir + readOnly: false + - name: tmp + mountPath: /tmp + {{- range $key := .Values.kubecostModel.plugins.enabledPlugins }} + - mountPath: {{ $.Values.kubecostModel.plugins.folder }}/config + name: plugins-config + readOnly: true + {{- end }} + {{- end }} env: - name: CONFIG_PATH value: /var/configs/ @@ -1118,6 +1217,8 @@ Begin Kubecost 2.0 templates value: {{ .Values.kubecostAggregator.cloudCost.queryWindowDays | default 7 | quote }} - name: CLOUD_COST_RUN_WINDOW_DAYS value: {{ .Values.kubecostAggregator.cloudCost.runWindowDays | default 3 | quote }} + - name: CUSTOM_COST_ENABLED + value: {{ .Values.kubecostModel.plugins.enabled | quote }} {{- with .Values.kubecostModel.cloudCost }} {{- with .labelList }} - name: CLOUD_COST_IS_INCLUDE_LIST @@ -1170,6 +1271,17 @@ Backups configured flag for nginx configmap {{- end -}} {{- end -}} +{{/* +costEventsAuditEnabled flag for nginx configmap +*/}} +{{- define "costEventsAuditEnabled" -}} + {{- if or (.Values.costEventsAudit).enabled -}} + {{- printf "true" -}} + {{- else -}} + {{- printf "false" -}} + {{- end -}} +{{- end -}} + {{- define "cost-analyzer.grafanaEnabled" -}} {{- if and (.Values.global.grafana.enabled) (not .Values.federatedETL.agentOnly) -}} {{- printf "true" -}} @@ -1232,3 +1344,19 @@ for more information {{- fail (include "azureCloudIntegrationJSON" .) }} {{- end }} {{- end }} + +{{- define "clusterControllerEnabled" }} +{{- if (.Values.clusterController).enabled }} +{{- printf "true" -}} +{{- else -}} +{{- printf "false" -}} +{{- end -}} +{{- end -}} + +{{- define "pluginsEnabled" }} +{{- if ((.Values.kubecostModel.plugins).install).enabled}} +{{- printf "true" -}} +{{- else -}} +{{- printf "false" -}} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/charts/kubecost/cost-analyzer/templates/aggregator-cloud-cost-deployment.yaml b/charts/kubecost/cost-analyzer/templates/aggregator-cloud-cost-deployment.yaml index 9b6764967..5c033ddc8 100644 --- a/charts/kubecost/cost-analyzer/templates/aggregator-cloud-cost-deployment.yaml +++ b/charts/kubecost/cost-analyzer/templates/aggregator-cloud-cost-deployment.yaml @@ -2,9 +2,13 @@ {{/* A cloud integration secret is required for cloud cost to function as a dedicated pod. + UI based configuration is not supported for cloud cost with aggregator. */}} -{{- if or (.Values.kubecostProductConfigs).cloudIntegrationSecret (.Values.kubecostProductConfigs).cloudIntegrationJSON ((.Values.kubecostProductConfigs).athenaBucketName) }} - +{{- if ((.Values.kubecostAggregator).cloudCost).enabled }} +{{- if not ( or (.Values.kubecostProductConfigs).cloudIntegrationSecret (.Values.kubecostProductConfigs).cloudIntegrationJSON ((.Values.kubecostProductConfigs).athenaBucketName)) }} +{{- fail "\n\nA cloud-integration secret is required when using the aggregator statefulset and cloudCost is enabled." }} +{{- end }} +{{- end }} apiVersion: apps/v1 kind: Deployment metadata: @@ -25,6 +29,10 @@ spec: template: metadata: labels: + {{/* + Force pod restarts on upgrades to ensure the nginx config is current + */}} + helm-rollout-restarter: {{ randAlphaNum 5 | quote }} app.kubernetes.io/name: cloud-cost app.kubernetes.io/instance: {{ .Release.Name }} app: cloud-cost @@ -51,14 +59,14 @@ spec: defaultMode: 420 secretName: {{ .Values.kubecostModel.federatedStorageConfigSecret }} {{- end }} - {{- if .Values.kubecostProductConfigs.cloudIntegrationSecret }} + {{- if (.Values.kubecostProductConfigs).cloudIntegrationSecret }} - name: cloud-integration secret: secretName: {{ .Values.kubecostProductConfigs.cloudIntegrationSecret }} items: - key: cloud-integration.json path: cloud-integration.json - {{- else if or .Values.kubecostProductConfigs.cloudIntegrationJSON ((.Values.kubecostProductConfigs).athenaProjectID) }} + {{- else if or (.Values.kubecostProductConfigs).cloudIntegrationJSON ((.Values.kubecostProductConfigs).athenaProjectID) }} - name: cloud-integration secret: secretName: cloud-integration @@ -66,10 +74,42 @@ spec: - key: cloud-integration.json path: cloud-integration.json {{- end }} - {{/* Titled persistent-configs to be compatible with single-pod install. - All data stored here is ephemeral, and does not require a PV. */}} + {{/* Despite the name, this is not persistent-configs. + The name is for compatibility with single-pod install. + All data stored here is ephemeral, and does not require persistence. */}} - name: persistent-configs emptyDir: {} + {{- if .Values.kubecostModel.plugins.enabled }} + {{- if .Values.kubecostModel.plugins.install.enabled}} + - name: install-script + configMap: + name: {{ template "cost-analyzer.fullname" . }}-install-plugins + {{- end }} + - name: plugins-dir + emptyDir: {} + - name: plugins-config + secret: + secretName: {{ .Values.kubecostModel.plugins.configSecret }} + items: + - key: datadog_config.json + path: datadog_config.json + - name: tmp + emptyDir: {} + {{- end }} + initContainers: + {{- if (and .Values.kubecostModel.plugins.enabled .Values.kubecostModel.plugins.install.enabled )}} + - name: plugin-installer + image: {{ .Values.kubecostModel.plugins.install.fullImageName }} + command: ["sh", "/install/install_plugins.sh"] + {{- with .Values.kubecostModel.plugins.install.securityContext }} + securityContext: {{- toYaml . | nindent 12 }} + {{- end }} + volumeMounts: + - name: install-script + mountPath: /install + - name: plugins-dir + mountPath: {{ .Values.kubecostModel.plugins.folder }} + {{- end }} containers: {{- include "aggregator.cloudCost.containerTemplate" . | nindent 8 }} {{- if .Values.imagePullSecrets }} @@ -97,5 +137,4 @@ spec: affinity: {{- toYaml . | nindent 8 }} {{- end }} -{{- end }} {{- end }} \ No newline at end of file diff --git a/charts/kubecost/cost-analyzer/templates/aggregator-cloud-cost-service-account.yaml b/charts/kubecost/cost-analyzer/templates/aggregator-cloud-cost-service-account.yaml index 3cfc37243..c8018f77b 100644 --- a/charts/kubecost/cost-analyzer/templates/aggregator-cloud-cost-service-account.yaml +++ b/charts/kubecost/cost-analyzer/templates/aggregator-cloud-cost-service-account.yaml @@ -1,4 +1,11 @@ -{{- if .Values.kubecostAggregator.cloudCost.enabled }} +{{- if eq (include "aggregator.deployMethod" .) "statefulset" }} + +{{/* + A cloud integration secret is required for cloud cost to function as a dedicated pod. + UI based configuration is not supported for cloud cost with aggregator. +*/}} + +{{- if or (.Values.kubecostProductConfigs).cloudIntegrationSecret (.Values.kubecostProductConfigs).cloudIntegrationJSON ((.Values.kubecostProductConfigs).athenaBucketName) }} {{- if and .Values.serviceAccount.create .Values.kubecostAggregator.cloudCost.serviceAccountName }} apiVersion: v1 kind: ServiceAccount @@ -13,3 +20,4 @@ metadata: {{- end }} {{- end }} {{- end }} +{{- end }} diff --git a/charts/kubecost/cost-analyzer/templates/aggregator-cloud-cost-service.yaml b/charts/kubecost/cost-analyzer/templates/aggregator-cloud-cost-service.yaml index 96a05b511..bef9bfdc5 100644 --- a/charts/kubecost/cost-analyzer/templates/aggregator-cloud-cost-service.yaml +++ b/charts/kubecost/cost-analyzer/templates/aggregator-cloud-cost-service.yaml @@ -1,6 +1,4 @@ -{{- if and (not .Values.agent) (not .Values.cloudAgent) }} -{{- if not (eq .Values.kubecostAggregator.deployMethod "disabled") }} - +{{- if not (eq (include "aggregator.deployMethod" .) "disabled") -}} kind: Service apiVersion: v1 metadata: @@ -16,5 +14,4 @@ spec: - name: tcp-api port: 9005 targetPort: 9005 -{{- end }} {{- end }} \ No newline at end of file diff --git a/charts/kubecost/cost-analyzer/templates/aggregator-service.yaml b/charts/kubecost/cost-analyzer/templates/aggregator-service.yaml index 7e487aff1..40f6729de 100644 --- a/charts/kubecost/cost-analyzer/templates/aggregator-service.yaml +++ b/charts/kubecost/cost-analyzer/templates/aggregator-service.yaml @@ -1,6 +1,4 @@ -{{- if and (not .Values.agent) (not .Values.cloudAgent) }} -{{- if not (eq .Values.kubecostAggregator.deployMethod "disabled") }} - +{{- if not (eq (include "aggregator.deployMethod" .) "disabled") -}} kind: Service apiVersion: v1 metadata: @@ -24,6 +22,4 @@ spec: {{- with .Values.kubecostAggregator.extraPorts }} {{- toYaml . | nindent 4 }} {{- end }} - -{{- end }} {{- end }} diff --git a/charts/kubecost/cost-analyzer/templates/aggregator-statefulset.yaml b/charts/kubecost/cost-analyzer/templates/aggregator-statefulset.yaml index 6293e73fd..f79f1abc8 100644 --- a/charts/kubecost/cost-analyzer/templates/aggregator-statefulset.yaml +++ b/charts/kubecost/cost-analyzer/templates/aggregator-statefulset.yaml @@ -44,6 +44,8 @@ spec: labels: app.kubernetes.io/name: aggregator app.kubernetes.io/instance: {{ .Release.Name }} + {{/* Force pod restarts on upgrades to ensure the nginx config is current */}} + helm-rollout-restarter: {{ randAlphaNum 5 | quote }} app: aggregator {{- with .Values.global.additionalLabels }} {{- toYaml . | nindent 8 }} @@ -85,7 +87,15 @@ spec: defaultMode: 420 secretName: {{ $etlBackupBucketSecret }} {{- else }} - {{- fail "Kubecost Aggregator Enterprise Config requires .Values.kubecostModel.federatedStorageConfigSecret" }} + {{- fail "\n\nKubecost Aggregator Enterprise Config requires .Values.kubecostModel.federatedStorageConfigSecret" }} + {{- end }} + {{- if and ((.Values.kubecostProductConfigs).productKey).enabled ((.Values.kubecostProductConfigs).productKey).secretname }} + - name: productkey-secret + secret: + secretName: {{ .Values.kubecostProductConfigs.productKey.secretname }} + items: + - key: productkey.json + path: productkey.json {{- end }} {{- if .Values.saml }} {{- if .Values.saml.enabled }} diff --git a/charts/kubecost/cost-analyzer/templates/cost-analyzer-deployment-template.yaml b/charts/kubecost/cost-analyzer/templates/cost-analyzer-deployment-template.yaml index 457561db6..5ffa8b588 100644 --- a/charts/kubecost/cost-analyzer/templates/cost-analyzer-deployment-template.yaml +++ b/charts/kubecost/cost-analyzer/templates/cost-analyzer-deployment-template.yaml @@ -37,6 +37,8 @@ spec: metadata: labels: {{- include "cost-analyzer.selectorLabels" . | nindent 8 }} + {{/* Force pod restarts on upgrades to ensure the nginx config is current */}} + helm-rollout-restarter: {{ randAlphaNum 5 | quote }} {{- if .Values.global.additionalLabels }} {{ toYaml .Values.global.additionalLabels | nindent 8 }} {{- end }} @@ -63,6 +65,8 @@ spec: restartPolicy: Always serviceAccountName: {{ template "cost-analyzer.serviceAccountName" . }} volumes: + - name: plugins-dir + emptyDir: {} {{- if .Values.global.gcpstore.enabled }} - name: ubbagent-config configMap: @@ -76,7 +80,7 @@ spec: {{- end }} - name: tmp emptyDir: {} - {{- if and .Values.kubecostFrontend.enabled (not .Values.federatedETL.agentOnly) }} + {{- if and .Values.kubecostFrontend.enabled (not .Values.federatedETL.agentOnly) (not (eq (include "frontend.deployMethod" .) "haMode")) }} - name: nginx-conf configMap: name: nginx-conf @@ -110,8 +114,7 @@ spec: secretName: {{ .Values.kubecostModel.federatedStorageConfigSecret }} {{- end }} {{- if .Values.kubecostProductConfigs }} - {{- if .Values.kubecostProductConfigs.productKey }} - {{- if and .Values.kubecostProductConfigs.productKey.enabled .Values.kubecostProductConfigs.productKey.secretname }} + {{- if and ((.Values.kubecostProductConfigs).productKey).enabled ((.Values.kubecostProductConfigs).productKey).secretname }} - name: productkey-secret secret: secretName: {{ .Values.kubecostProductConfigs.productKey.secretname }} @@ -119,7 +122,6 @@ spec: - key: productkey.json path: productkey.json {{- end }} - {{- end -}} {{- if .Values.kubecostProductConfigs }} {{- if .Values.kubecostProductConfigs.gcpSecretName }} - name: gcp-key-secret @@ -294,8 +296,8 @@ spec: claimName: {{ template "cost-analyzer.fullname" . }}-db {{- end }} {{- end }} -{{- if .Values.supportNFS }} initContainers: + {{- if .Values.supportNFS }} - name: config-db-perms-fix {{- if .Values.initChownDataImage }} image: {{ .Values.initChownDataImage }} @@ -546,12 +548,10 @@ spec: {{- end }} {{- end }} {{- if .Values.kubecostProductConfigs }} - {{- if .Values.kubecostProductConfigs.productKey }} - {{- if .Values.kubecostProductConfigs.productKey.secretname }} + {{- if and ((.Values.kubecostProductConfigs).productKey).enabled ((.Values.kubecostProductConfigs).productKey).secretname }} - name: productkey-secret mountPath: /var/configs/productkey {{- end }} - {{- end }} {{- if .Values.kubecostProductConfigs.gcpSecretName }} - name: gcp-key-secret mountPath: /var/secrets @@ -611,7 +611,7 @@ spec: {{- if .Values.oidc.enabled }} - name: oidc-config mountPath: /var/configs/oidc - {{- if .Values.oidc.secretName }} + {{- if or .Values.oidc.existingCustomSecret.name .Values.oidc.secretName }} - name: oidc-client-secret mountPath: /var/configs/oidc-client-secret {{- end }} @@ -699,12 +699,10 @@ spec: value: production {{- end }} {{- if .Values.kubecostProductConfigs }} - {{- if .Values.kubecostProductConfigs.productKey }} - {{- if .Values.kubecostProductConfigs.productKey.mountPath }} + {{- if ((.Values.kubecostProductConfigs).productKey).mountPath }} - name: PRODUCT_KEY_MOUNT_PATH value: {{ .Values.kubecostProductConfigs.productKey.mountPath }} {{- end }} - {{- end }} {{- if .Values.kubecostProductConfigs.ingestPodUID }} - name: INGEST_POD_UID value: {{ (quote .Values.kubecostProductConfigs.ingestPodUID) }} @@ -1007,8 +1005,11 @@ spec: key: kubecost-token - name: WATERFOWL_ENABLED value: "true" + {{- if not (.Values.diagnostics.enabled) }} + - name: DIAGNOSTICS_RUN_IN_COST_MODEL + value: "false" {{- /*A pre-requisite for running MultiClusterDiagnostics in the cost-model container is a configured federated-store secret and cluster_id*/}} - {{- if or (empty .Values.kubecostModel.federatedStorageConfigSecret) (eq .Values.prometheus.server.global.external_labels.cluster_id "cluster-one") }} + {{- else if or (empty .Values.kubecostModel.federatedStorageConfigSecret) (eq .Values.prometheus.server.global.external_labels.cluster_id "cluster-one") }} - name: DIAGNOSTICS_RUN_IN_COST_MODEL value: "false" {{- else if .Values.diagnostics.deployment.enabled }} @@ -1034,7 +1035,7 @@ spec: - name: DIAGNOSTICS_COLLECT_HELM_VALUES value: {{ quote .Values.diagnostics.collectHelmValues }} {{- end }} - {{- if and .Values.kubecostFrontend.enabled (not .Values.federatedETL.agentOnly) }} + {{- if and .Values.kubecostFrontend.enabled (not .Values.federatedETL.agentOnly) (not (eq (include "frontend.deployMethod" .) "haMode")) }} {{- if .Values.kubecostFrontend }} {{- if .Values.kubecostFrontend.fullImageName }} - image: {{ .Values.kubecostFrontend.fullImageName }} diff --git a/charts/kubecost/cost-analyzer/templates/cost-analyzer-frontend-config-map-template.yaml b/charts/kubecost/cost-analyzer/templates/cost-analyzer-frontend-config-map-template.yaml index dc2cf8bd5..dcee75588 100644 --- a/charts/kubecost/cost-analyzer/templates/cost-analyzer-frontend-config-map-template.yaml +++ b/charts/kubecost/cost-analyzer/templates/cost-analyzer-frontend-config-map-template.yaml @@ -165,7 +165,7 @@ data: add_header Cache-Control "must-revalidate"; - {{- if.Values.kubecostFrontend.extraServerConfig }} + {{- if .Values.kubecostFrontend.extraServerConfig }} {{- .Values.kubecostFrontend.extraServerConfig | toString | nindent 8 -}} {{- else }} large_client_header_buffers 4 32k; @@ -188,6 +188,10 @@ data: error_page 401 = /login; try_files $uri $uri/ /index.html; } + location /healthz { + add_header 'Content-Type' 'text/plain'; + return 200 "healthy\n"; + } {{- else }} add_header Cache-Control "max-age=300"; location / { @@ -280,6 +284,8 @@ data: return 404; {{- end }} } +{{- if and (or .Values.saml.enabled .Values.oidc.enabled) (not (eq (include "aggregator.deployMethod" .) "disabled")) }} + {{- if .Values.oidc.enabled }} location /oidc/ { proxy_connect_timeout 180; proxy_send_timeout 180; @@ -291,6 +297,8 @@ data: proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; } + {{- end }} + {{- if .Values.saml.enabled }} location /saml/ { proxy_connect_timeout 180; proxy_send_timeout 180; @@ -302,6 +310,8 @@ data: proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; } + {{- end }} + {{- if or .Values.saml.enabled .Values.oidc.enabled}} location /login { proxy_connect_timeout 180; proxy_send_timeout 180; @@ -326,7 +336,8 @@ data: proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; } - + {{- end }} +{{- end }} {{- if .Values.global.grafana.proxy }} location /grafana/ { {{- if .Values.saml.enabled }} @@ -417,6 +428,14 @@ data: proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; } + location = /model/allocation/carbon { + proxy_read_timeout 300; + proxy_pass http://aggregator/allocation/carbon; + proxy_redirect off; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } location = /model/assets { proxy_read_timeout {{ .Values.kubecostFrontend.timeoutSeconds | default 300 }}; proxy_pass http://aggregator/assets; @@ -458,6 +477,14 @@ data: proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; } + location = /model/assets/carbon { + proxy_read_timeout 300; + proxy_pass http://aggregator/assets/carbon; + proxy_redirect off; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } location = /model/savings/requestSizingV2 { proxy_read_timeout {{ .Values.kubecostFrontend.timeoutSeconds | default 300 }}; proxy_pass http://aggregator/savings/requestSizingV2; @@ -474,6 +501,14 @@ data: proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; } + location = /model/savings/clusterSizingETL { + proxy_read_timeout {{ .Values.kubecostFrontend.timeoutSeconds | default 300 }}; + proxy_pass http://aggregator/savings/clusterSizingETL; + proxy_redirect off; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } location = /model/cloudCost { proxy_read_timeout {{ .Values.kubecostFrontend.timeoutSeconds | default 300 }}; proxy_pass http://aggregator/cloudCost; @@ -586,16 +621,6 @@ data: proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; } - {{- if eq (default .Values.kubecostAggregator.env.MEMORY_INTENSIVE_CLUSTER_SIZING "disabled") "enabled" }} - location = /model/savings/clusterSizingETL { - proxy_read_timeout 600; - proxy_pass http://aggregator/savings/clusterSizingETL; - proxy_redirect off; - proxy_set_header Connection ""; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - } - {{- end }} location = /model/reports/allocation { proxy_read_timeout {{ .Values.kubecostFrontend.timeoutSeconds | default 300 }}; proxy_pass http://aggregator/reports/allocation; @@ -943,6 +968,30 @@ data: proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; } + location = /model/enablements { + proxy_read_timeout 300; + proxy_pass http://aggregator/enablements; + proxy_redirect off; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + location = /model/customCost/total { + proxy_read_timeout 300; + proxy_pass http://aggregator/customCost/total; + proxy_redirect off; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + location = /model/customCost/timeseries { + proxy_read_timeout 300; + proxy_pass http://aggregator/customCost/timeseries; + proxy_redirect off; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } #Cloud Cost Endpoints location = /model/cloudCost/status { @@ -969,25 +1018,57 @@ data: proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; } - location = /model/cloudCost/integration/export { + location = /model/cloud/config { proxy_read_timeout {{ .Values.kubecostFrontend.timeoutSeconds | default 300 }}; - proxy_pass http://cloudCost/cloudCost/integration/export; + proxy_pass http://cloudCost/cloud/config; proxy_redirect off; proxy_set_header Connection ""; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; } - location = /model/cloudCost/integration/enable { + location = /model/cloud/config/export { proxy_read_timeout {{ .Values.kubecostFrontend.timeoutSeconds | default 300 }}; - proxy_pass http://cloudCost/cloudCost/integration/enable; + proxy_pass http://cloudCost/cloud/config/export; proxy_redirect off; proxy_set_header Connection ""; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; } - location = /model/cloudCost/integration/disable { + location = /model/cloud/config/enable { proxy_read_timeout {{ .Values.kubecostFrontend.timeoutSeconds | default 300 }}; - proxy_pass http://cloudCost/cloudCost/integration/disable; + proxy_pass http://cloudCost/cloud/config/enable; + proxy_redirect off; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + location = /model/cloud/config/disable { + proxy_read_timeout {{ .Values.kubecostFrontend.timeoutSeconds | default 300 }}; + proxy_pass http://cloudCost/cloud/config/disable; + proxy_redirect off; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + location = /model/cloudCost/integration/validate { + proxy_read_timeout {{ .Values.kubecostFrontend.timeoutSeconds | default 300 }}; + proxy_pass http://cloudCost/cloudCost/integration/validate; + proxy_redirect off; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + location = /model/customCost/status { + proxy_read_timeout 300; + proxy_pass http://cloudCost/customCost/status; + proxy_redirect off; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + location = /model/customCost/rebuild { + proxy_read_timeout 300; + proxy_pass http://cloudCost/customCost/rebuild; proxy_redirect off; proxy_set_header Connection ""; proxy_set_header X-Real-IP $remote_addr; @@ -1099,7 +1180,11 @@ data: return 200 '\n { "ssoConfigured": "{{ template "ssoEnabled" . }}", - "dataBackupConfigured": "{{ template "dataBackupConfigured" . }}" + "dataBackupConfigured": "{{ template "dataBackupConfigured" . }}", + "costEventsAuditEnabled": "{{ template "costEventsAuditEnabled" . }}", + "frontendDeployMethod": "{{ template "frontend.deployMethod" . }}", + "pluginsEnabled": "{{ template "pluginsEnabled" . }}", + "clusterControllerEnabled": "{{ template "clusterControllerEnabled" . }}" } '; } diff --git a/charts/kubecost/cost-analyzer/templates/cost-analyzer-ingress-template.yaml b/charts/kubecost/cost-analyzer/templates/cost-analyzer-ingress-template.yaml index 03fb95bd4..4ac0693dd 100644 --- a/charts/kubecost/cost-analyzer/templates/cost-analyzer-ingress-template.yaml +++ b/charts/kubecost/cost-analyzer/templates/cost-analyzer-ingress-template.yaml @@ -1,7 +1,12 @@ {{- if .Values.ingress -}} {{- if .Values.ingress.enabled -}} {{- $fullName := include "cost-analyzer.fullname" . -}} -{{- $serviceName := include "cost-analyzer.serviceName" . -}} +{{- $serviceName := "" -}} +{{- if eq (include "frontend.deployMethod" .) "haMode" }} +{{- $serviceName = include "frontend.serviceName" . }} +{{- else }} +{{- $serviceName = include "cost-analyzer.serviceName" . -}} +{{- end }} {{- $ingressPaths := .Values.ingress.paths -}} {{- $ingressPathType := .Values.ingress.pathType -}} apiVersion: networking.k8s.io/v1 diff --git a/charts/kubecost/cost-analyzer/templates/cost-analyzer-network-costs-template.yaml b/charts/kubecost/cost-analyzer/templates/cost-analyzer-network-costs-template.yaml index 0dad4bc8c..7af788153 100644 --- a/charts/kubecost/cost-analyzer/templates/cost-analyzer-network-costs-template.yaml +++ b/charts/kubecost/cost-analyzer/templates/cost-analyzer-network-costs-template.yaml @@ -69,7 +69,7 @@ spec: - name: TRAFFIC_LOGGING_ENABLED value: {{ (quote .Values.networkCosts.trafficLogging) | default (quote true) }} - name: LOG_LEVEL - value: info + value: {{ .Values.networkCosts.logLevel | default "info" }} {{- if .Values.networkCosts.softMemoryLimit }} - name: GOMEMLIMIT value: {{ .Values.networkCosts.softMemoryLimit }} diff --git a/charts/kubecost/cost-analyzer/templates/cost-analyzer-service-template.yaml b/charts/kubecost/cost-analyzer/templates/cost-analyzer-service-template.yaml index dd2121eae..82d957fca 100644 --- a/charts/kubecost/cost-analyzer/templates/cost-analyzer-service-template.yaml +++ b/charts/kubecost/cost-analyzer/templates/cost-analyzer-service-template.yaml @@ -24,6 +24,12 @@ spec: {{- end }} {{- else }} type: ClusterIP +{{- end }} +{{- if (eq .Values.service.type "LoadBalancer") }} + {{- if .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{ toYaml .Values.service.loadBalancerSourceRanges | indent 4 }} + {{- end -}} {{- end }} ports: - name: tcp-model @@ -32,7 +38,7 @@ spec: {{- with .Values.kubecostModel.extraPorts }} {{- toYaml . | nindent 4 }} {{- end }} - {{- if .Values.kubecostFrontend.enabled }} + {{- if and (.Values.kubecostFrontend.enabled) (not (eq (include "frontend.deployMethod" .) "haMode")) }} - name: tcp-frontend {{- if (eq .Values.service.type "NodePort") }} {{- if .Values.service.nodePort }} diff --git a/charts/kubecost/cost-analyzer/templates/forecasting-deployment.yaml b/charts/kubecost/cost-analyzer/templates/forecasting-deployment.yaml index 3284a67a5..dec8e6316 100644 --- a/charts/kubecost/cost-analyzer/templates/forecasting-deployment.yaml +++ b/charts/kubecost/cost-analyzer/templates/forecasting-deployment.yaml @@ -67,8 +67,13 @@ spec: env: - name: CONFIG_PATH value: /var/configs/ + {{- if or .Values.saml.enabled .Values.oidc.enabled }} - name: KCM_BASE_URL - value: http://{{ template "cost-analyzer.serviceName" . }}:9090/model + value: http://{{ template "aggregator.serviceName" . }}:9008 + {{- else }} + - name: KCM_BASE_URL + value: http://{{ template "aggregator.serviceName" . }}:9004 + {{- end }} - name: MODEL_STORAGE_PATH value: "/tmp/localrun/models" - name: PAGE_ITEM_LIMIT diff --git a/charts/kubecost/cost-analyzer/templates/frontend-deployment-template.yaml b/charts/kubecost/cost-analyzer/templates/frontend-deployment-template.yaml new file mode 100644 index 000000000..17f52f369 --- /dev/null +++ b/charts/kubecost/cost-analyzer/templates/frontend-deployment-template.yaml @@ -0,0 +1,213 @@ +{{- if eq (include "frontend.deployMethod" .) "haMode" }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "frontend.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cost-analyzer.commonLabels" . | nindent 4 }} + {{- if and .Values.kubecostDeployment .Values.kubecostDeployment.labels }} + {{- toYaml .Values.kubecostDeployment.labels | nindent 4 }} + {{- end }} + {{- if and .Values.kubecostDeployment .Values.kubecostDeployment.annotations }} + annotations: + {{- toYaml .Values.kubecostDeployment.annotations | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.kubecostFrontend.haReplicas | default 2 }} + selector: + matchLabels: + {{- include "frontend.selectorLabels" . | nindent 6 }} + {{- if .Values.kubecostFrontend.deploymentStrategy }} + {{- with .Values.kubecostFrontend.deploymentStrategy }} + strategy: {{ toYaml . | nindent 4 }} + {{- end }} + {{- else }} + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + {{- end }} + template: + metadata: + labels: + {{/* + Force pod restarts on upgrades to ensure the nginx config is current + */}} + helm-rollout-restarter: {{ randAlphaNum 5 | quote }} + {{- include "frontend.selectorLabels" . | nindent 8 }} + {{- if .Values.global.additionalLabels }} + {{- toYaml .Values.global.additionalLabels | nindent 8 }} + {{- end }} + {{- if and .Values.kubecostDeployment .Values.kubecostDeployment.labels }} + {{- toYaml .Values.kubecostDeployment.labels | nindent 8 }} + {{- end }} + {{- with .Values.global.podAnnotations}} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- if .Values.global.platforms.openshift.enabled }} + securityContext: + {{- toYaml .Values.global.platforms.openshift.securityContext | nindent 8 }} + {{- else if .Values.global.securityContext }} + securityContext: + {{- toYaml .Values.global.securityContext | nindent 8 }} + {{- else }} + securityContext: + runAsUser: 1001 + runAsGroup: 1001 + fsGroup: 1001 + {{- end }} + restartPolicy: Always + serviceAccountName: {{ template "cost-analyzer.serviceAccountName" . }} + volumes: + - name: tmp + emptyDir: {} + - name: nginx-conf + configMap: + name: nginx-conf + items: + - key: nginx.conf + path: default.conf + {{- if .Values.global.containerSecuritycontext }} + - name: var-run + emptyDir: {} + - name: cache + emptyDir: {} + {{- end }} + {{- if .Values.kubecostFrontend.tls }} + {{- if .Values.kubecostFrontend.tls.enabled }} + - name: tls + secret: + secretName : {{ .Values.kubecostFrontend.tls.secretName }} + items: + - key: tls.crt + path: kc.crt + - key: tls.key + path: kc.key + {{- end }} + {{- end }} + {{- if .Values.kubecostAdmissionController }} + {{- if .Values.kubecostAdmissionController.enabled }} + {{- if .Values.kubecostAdmissionController.secretName }} + - name: webhook-server-tls + secret: + secretName: {{ .Values.kubecostAdmissionController.secretName }} + items: + - key: tls.crt + path: tls.crt + - key: tls.key + path: tls.key + {{- end }} + {{- end }} + {{- end }} + containers: + {{- if .Values.kubecostFrontend }} + {{- if .Values.kubecostFrontend.fullImageName }} + - image: {{ .Values.kubecostFrontend.fullImageName }} + {{- else if .Values.imageVersion }} + - image: {{ .Values.kubecostFrontend.image }}:{{ .Values.imageVersion }} + {{- else if eq "development" .Chart.AppVersion }} + - image: gcr.io/kubecost1/frontend-nightly:latest + {{- else }} + - image: {{ .Values.kubecostFrontend.image }}:prod-{{ $.Chart.AppVersion }} + {{- end }} + {{- else }} + - image: gcr.io/kubecost1/frontend:prod-{{ $.Chart.AppVersion }} + {{- end }} + name: cost-analyzer-frontend + ports: + - name: tcp-frontend + containerPort: 9090 + protocol: TCP + env: + - name: GET_HOSTS_FROM + value: dns + {{- if .Values.kubecostFrontend.extraEnv -}} + {{ toYaml .Values.kubecostFrontend.extraEnv | nindent 12 }} + {{- end }} + {{- if .Values.kubecostFrontend.securityContext }} + securityContext: + {{- toYaml .Values.kubecostFrontend.securityContext | nindent 12 }} + {{- else }} + securityContext: + {{- toYaml .Values.global.containerSecurityContext | nindent 12 }} + {{- end }} + volumeMounts: + - name: tmp + mountPath: /tmp + - name: nginx-conf + mountPath: /etc/nginx/conf.d/ + {{- if .Values.global.containerSecuritycontext }} + - mountPath: /var/cache/nginx + name: cache + - mountPath: /var/run + name: var-run + {{- end }} + {{- if .Values.kubecostFrontend.tls }} + {{- if .Values.kubecostFrontend.tls.enabled }} + - name: tls + mountPath: /etc/ssl/certs + {{- end }} + {{- end }} + resources: + {{- toYaml .Values.kubecostFrontend.resources | nindent 12 }} + {{- if .Values.kubecostFrontend.imagePullPolicy }} + imagePullPolicy: {{ .Values.kubecostFrontend.imagePullPolicy }} + {{- else }} + imagePullPolicy: Always + {{- end }} + {{- if .Values.kubecostFrontend.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: /healthz + port: 9090 + initialDelaySeconds: {{ .Values.kubecostFrontend.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.kubecostFrontend.readinessProbe.periodSeconds }} + failureThreshold: {{ .Values.kubecostFrontend.readinessProbe.failureThreshold }} + {{- end }} + {{- if .Values.kubecostFrontend.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: /healthz + port: 9090 + initialDelaySeconds: {{ .Values.kubecostFrontend.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.kubecostFrontend.livenessProbe.periodSeconds }} + failureThreshold: {{ .Values.kubecostFrontend.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.global.containerSecuritycontext }} + securityContext: + {{- toYaml .Values.global.containerSecuritycontext | nindent 12 }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + {{ toYaml .Values.imagePullSecrets | indent 2 }} + {{- end }} + {{- if .Values.priority }} + {{- if .Values.priority.enabled }} + {{- if gt (len .Values.priority.name) 0 }} + priorityClassName: {{ .Values.priority.name }} + {{- else }} + priorityClassName: {{ template "cost-analyzer.fullname" . }}-priority + {{- end }} + {{- end }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.topologySpreadConstraints }} + topologySpreadConstraints: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} diff --git a/charts/kubecost/cost-analyzer/templates/frontend-service-template.yaml b/charts/kubecost/cost-analyzer/templates/frontend-service-template.yaml new file mode 100644 index 000000000..22c2d4fde --- /dev/null +++ b/charts/kubecost/cost-analyzer/templates/frontend-service-template.yaml @@ -0,0 +1,53 @@ +{{- if eq (include "frontend.deployMethod" .) "haMode" }} +kind: Service +apiVersion: v1 +metadata: + name: {{ template "frontend.serviceName" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "cost-analyzer.commonLabels" . | nindent 4 }} +{{- if .Values.service.labels }} +{{ toYaml .Values.service.labels | indent 4 }} +{{- end }} +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} +spec: + selector: + {{- include "frontend.selectorLabels" . | nindent 4 }} +{{- if .Values.service -}} +{{- if .Values.service.type }} + type: "{{ .Values.service.type }}" +{{- else }} + type: ClusterIP +{{- end }} +{{- else }} + type: ClusterIP +{{- end }} +{{- if (eq .Values.service.type "LoadBalancer") }} + {{- if .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{ toYaml .Values.service.loadBalancerSourceRanges | indent 4 }} + {{- end -}} +{{- end }} + ports: + - name: tcp-frontend + {{- if (eq .Values.service.type "NodePort") }} + {{- if .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} + {{- end }} + port: {{ .Values.service.port }} + targetPort: {{ .Values.service.targetPort }} +{{- if .Values.service.sessionAffinity.enabled }} + sessionAffinity: ClientIP + {{- if .Values.service.sessionAffinity.timeoutSeconds }} + sessionAffinityConfig: + clientIP: + timeoutSeconds: {{ .Values.service.sessionAffinity.timeoutSeconds }} + {{- end }} +{{- else }} + sessionAffinity: None +{{- end }} +{{- end }} diff --git a/charts/kubecost/cost-analyzer/templates/install-plugins.yaml b/charts/kubecost/cost-analyzer/templates/install-plugins.yaml new file mode 100644 index 000000000..f2abf1c41 --- /dev/null +++ b/charts/kubecost/cost-analyzer/templates/install-plugins.yaml @@ -0,0 +1,43 @@ +{{- if .Values.kubecostModel.plugins.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "cost-analyzer.fullname" . }}-install-plugins + labels: + {{ include "cost-analyzer.commonLabels" . | nindent 4 }} +data: + install_plugins.sh: |- + {{- if .Values.kubecostModel.plugins.install.enabled }} + set -ex + rm -f {{ .Values.kubecostModel.plugins.folder }}/bin/* + mkdir -p {{ .Values.kubecostModel.plugins.folder }}/bin + cd {{ .Values.kubecostModel.plugins.folder }}/bin + OSTYPE=$(cat /etc/os-release) + OS='' + case "$OSTYPE" in + *Linux*) OS='linux';; + *) echo "$OSTYPE is unsupported" && exit 1 ;; + esac + + UNAME_OUTPUT=$(uname -m) + ARCH='' + case "$UNAME_OUTPUT" in + *x86_64*) ARCH='amd64';; + *amd64*) ARCH='amd64';; + *aarch64*) ARCH='arm64';; + *arm64*) ARCH='arm64';; + *) echo "$UNAME_OUTPUT is unsupported" && exit 1 ;; + esac + + {{- if .Values.kubecostModel.plugins.version }} + VER={{ .Values.kubecostModel.plugins.version | quote}} + {{- else }} + VER=$(curl --silent https://api.github.com/repos/opencost/opencost-plugins/releases/latest | grep ".tag_name" | awk -F\" '{print $4}') + {{- end }} + + {{- range $pluginName := .Values.kubecostModel.plugins.enabledPlugins }} + curl -fsSLO "https://github.com/opencost/opencost-plugins/releases/download/$VER/{{ $pluginName }}.ocplugin.$OS.$ARCH" + chmod a+rx "{{ $pluginName }}.ocplugin.$OS.$ARCH" + {{- end }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/kubecost/cost-analyzer/templates/kubecost-cluster-controller-actions-configmap.yaml b/charts/kubecost/cost-analyzer/templates/kubecost-cluster-controller-actions-config.yaml similarity index 77% rename from charts/kubecost/cost-analyzer/templates/kubecost-cluster-controller-actions-configmap.yaml rename to charts/kubecost/cost-analyzer/templates/kubecost-cluster-controller-actions-config.yaml index e5c0f7705..114f381b0 100644 --- a/charts/kubecost/cost-analyzer/templates/kubecost-cluster-controller-actions-configmap.yaml +++ b/charts/kubecost/cost-analyzer/templates/kubecost-cluster-controller-actions-config.yaml @@ -1,3 +1,5 @@ +{{- if .Values.clusterController }} +{{- if .Values.clusterController.enabled }} apiVersion: v1 kind: ConfigMap metadata: @@ -38,4 +40,17 @@ metadata: binaryData: config: | {{- toJson .Values.clusterController.actionConfigs.containerRightsize | b64enc | nindent 4 }} -{{- end }} \ No newline at end of file +{{- end }} +{{- range .Values.clusterController.actionConfigs.clusterTurndown }} +--- +apiVersion: kubecost.com/v1alpha1 +kind: TurndownSchedule +metadata: + name: {{ .name }} +spec: + start: {{ .start }} + end: {{ .end }} + repeat: {{ .repeat }} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/kubecost/cost-analyzer/templates/kubecost-cluster-controller-template.yaml b/charts/kubecost/cost-analyzer/templates/kubecost-cluster-controller-template.yaml index ce1691ef5..ac86658be 100644 --- a/charts/kubecost/cost-analyzer/templates/kubecost-cluster-controller-template.yaml +++ b/charts/kubecost/cost-analyzer/templates/kubecost-cluster-controller-template.yaml @@ -289,86 +289,5 @@ spec: targetPort: 9731 selector: app: {{ template "kubecost.clusterControllerName" . }} ---- -# TurndownSchedule Custom Resource Definition for persistence -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: turndownschedules.kubecost.com - labels: - {{ include "cost-analyzer.commonLabels" . | nindent 4 }} -spec: - group: kubecost.com - names: - kind: TurndownSchedule - singular: turndownschedule - plural: turndownschedules - shortNames: - - td - - tds - scope: Cluster - versions: - - name: v1alpha1 - served: true - storage: true - subresources: - status: {} - schema: - openAPIV3Schema: - type: object - properties: - spec: - type: object - properties: - start: - type: string - format: date-time - end: - type: string - format: date-time - repeat: - type: string - enum: [none, daily, weekly] - status: - type: object - properties: - state: - type: string - lastUpdated: - format: date-time - type: string - current: - type: string - scaleDownId: - type: string - nextScaleDownTime: - format: date-time - type: string - scaleDownMetadata: - additionalProperties: - type: string - type: object - scaleUpID: - type: string - nextScaleUpTime: - format: date-time - type: string - scaleUpMetadata: - additionalProperties: - type: string - type: object - additionalPrinterColumns: - - name: State - type: string - description: The state of the turndownschedule - jsonPath: .status.state - - name: Next Turndown - type: string - description: The next turndown date-time - jsonPath: .status.nextScaleDownTime - - name: Next Turn Up - type: string - description: The next turn up date-time - jsonPath: .status.nextScaleUpTime {{- end }} {{- end }} diff --git a/charts/kubecost/cost-analyzer/templates/plugins-config.yaml b/charts/kubecost/cost-analyzer/templates/plugins-config.yaml new file mode 100644 index 000000000..5cc312e8a --- /dev/null +++ b/charts/kubecost/cost-analyzer/templates/plugins-config.yaml @@ -0,0 +1,14 @@ +{{- if .Values.kubecostModel.plugins.enabled }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .Values.kubecostModel.plugins.configSecret }} + labels: + {{ include "cost-analyzer.commonLabels" . | nindent 4 }} +data: + {{- range $key, $config := .Values.kubecostModel.plugins.configs }} + {{ $key }}_config.json: + {{ $config | b64enc | indent 4}} + {{- end }} +{{- end }} + diff --git a/charts/kubecost/cost-analyzer/templates/prometheus-server-deployment.yaml b/charts/kubecost/cost-analyzer/templates/prometheus-server-deployment.yaml index 2151b4f8a..f7413476f 100644 --- a/charts/kubecost/cost-analyzer/templates/prometheus-server-deployment.yaml +++ b/charts/kubecost/cost-analyzer/templates/prometheus-server-deployment.yaml @@ -28,6 +28,10 @@ spec: {{ toYaml .Values.prometheus.server.podAnnotations | indent 8 }} {{- end }} labels: + {{/* + Force pod restarts on upgrades to ensure the configmap is current + */}} + helm-rollout-restarter: {{ randAlphaNum 5 | quote }} {{- include "prometheus.server.labels" . | nindent 8 }} {{- if .Values.prometheus.server.podLabels}} {{ toYaml .Values.prometheus.server.podLabels | nindent 8 }} diff --git a/charts/kubecost/cost-analyzer/values-eks-cost-monitoring.yaml b/charts/kubecost/cost-analyzer/values-eks-cost-monitoring.yaml index a4490afe8..f9dc538cd 100644 --- a/charts/kubecost/cost-analyzer/values-eks-cost-monitoring.yaml +++ b/charts/kubecost/cost-analyzer/values-eks-cost-monitoring.yaml @@ -67,7 +67,7 @@ kubecostModel: # memory: "256Mi" forecasting: - fullImageName: public.ecr.aws/kubecost/kubecost-modeling:v0.1.2 + fullImageName: public.ecr.aws/kubecost/kubecost-modeling:v0.1.5 networkCosts: enabled: false @@ -125,7 +125,7 @@ prometheus: # clusterIDConfigmap: cluster-id-configmap image: repository: public.ecr.aws/kubecost/prometheus - tag: v2.49.1 + tag: v2.50.1 resources: {} # limits: # cpu: 500m diff --git a/charts/kubecost/cost-analyzer/values.yaml b/charts/kubecost/cost-analyzer/values.yaml index 98a92d926..421d25ad1 100644 --- a/charts/kubecost/cost-analyzer/values.yaml +++ b/charts/kubecost/cost-analyzer/values.yaml @@ -249,6 +249,11 @@ global: enabled: false # Set to true when using affected CI/CD tools for access to the below configuration options. skipSanityChecks: false # If true, skip all sanity/existence checks for resources like Secrets. +## Provide a name override for the chart. +# nameOverride: "" +## Provide a full name override option for the chart. +# fullnameOverride: "" + ## This flag is only required for users upgrading to a new version of Kubecost. ## The flag is used to ensure users are aware of important ## (potentially breaking) changes included in the new version. @@ -363,6 +368,8 @@ systemProxy: kubecostFrontend: enabled: true + deployMethod: singlepod haMode or singlepod - haMode is currently only supported with Enterprise tier + haReplicas: 2 # only used with haMode image: "gcr.io/kubecost1/frontend" imagePullPolicy: Always # fullImageName overrides the default image construction logic. The exact @@ -381,19 +388,25 @@ kubecostFrontend: # limits: # cpu: "100m" # memory: "256Mi" + deploymentStrategy: {} + # rollingUpdate: + # maxSurge: 1 + # maxUnavailable: 1 + # type: RollingUpdate + # Define a readiness probe for the Kubecost frontend container. readinessProbe: enabled: true - initialDelaySeconds: 10 - periodSeconds: 10 - failureThreshold: 200 + initialDelaySeconds: 1 + periodSeconds: 5 + failureThreshold: 6 # Define a liveness probe for the Kubecost frontend container. livenessProbe: enabled: true - initialDelaySeconds: 10 - periodSeconds: 10 - failureThreshold: 200 + initialDelaySeconds: 1 + periodSeconds: 5 + failureThreshold: 6 ipv6: enabled: true # disable if the cluster does not support ipv6 # timeoutSeconds: 600 # should be rarely used, but can be increased if needed @@ -423,7 +436,6 @@ kubecostFrontend: # clusterController: # fqdn: cluster-controller.kubecost.svc.cluster.local:9731 - # Kubecost Metrics deploys a separate pod which will emit kubernetes specific metrics required # by the cost-model. This pod is designed to remain active and decoupled from the cost-model itself. # However, disabling this service/pod deployment will flag the cost-model to emit the metrics instead. @@ -516,6 +528,8 @@ kubecostModel: etlDailyStoreDurationDays: 91 # The total number of hours the ETL pipelines will build # Set to 0 to disable hourly ETL (not recommended) + # Must be < prometheus server retention, otherwise empty data may overwrite + # known-good data etlHourlyStoreDurationHours: 49 # The total number of weeks the ETL pipelines will build # Set to 0 to disable weekly ETL (not recommended) @@ -530,6 +544,43 @@ kubecostModel: # under a key named federated-store.yaml. # federatedStorageConfigSecret: "" + # Installs Kubecost/OpenCost plugins + plugins: + enabled: true + install: + enabled: true + fullImageName: curlimages/curl:latest + securityContext: + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 1000 + folder: /opt/opencost/plugin + + # leave this commented to always download most recent version of plugins + # version: + + # the list of enabled plugins + enabledPlugins: [] + # - datadog + + # pre-existing secret for plugin configuration + configSecret: kubecost-plugin-secret + + # uncomment this to define plugin configuration via the values file + # configs: + # datadog: | + # { + # "datadog_site": "", + # "datadog_api_key": "", + # "datadog_app_key": "" + # } + ## Feature to view your out-of-cluster costs and their k8s utilization ## Ref: https://docs.kubecost.com/using-kubecost/navigating-the-kubecost-ui/cloud-costs-explorer cloudCost: @@ -559,7 +610,7 @@ kubecostModel: # please monitor Kubecost logs, Thanos query logs, and Thanos load closely. # We hope to make major improvements at scale here soon! # - # containerStatsEnabled: false + containerStatsEnabled: true # enabled by default as of v2.2.0 # max number of concurrent Prometheus queries maxQueryConcurrency: 5 @@ -713,6 +764,7 @@ persistentVolume: # existingClaim: kubecost-cost-analyzer # a claim in the same namespace as kubecost labels: {} annotations: {} + # helm.sh/resource-policy: keep # https://helm.sh/docs/howto/charts_tips_and_tricks/#tell-helm-not-to-uninstall-a-resource # Enables a separate PV specifically for ETL data. This should be avoided, but # is kept for legacy compatibility. @@ -722,14 +774,20 @@ service: type: ClusterIP port: 9090 targetPort: 9090 - # nodePort: + nodePort: {} labels: {} annotations: {} + # loadBalancerSourceRanges: [] sessionAffinity: enabled: false # Makes sure that connections from a client are passed to the same Pod each time, when set to `true`. You should set it when you enabled authentication through OIDC or SAML integration. timeoutSeconds: 10800 prometheus: + ## Provide a full name override for Prometheus. + # fullnameOverride: "" + ## Provide a name override for Prometheus. + # nameOverride: "" + rbac: create: true # Create the RBAC resources for Prometheus. @@ -789,6 +847,9 @@ prometheus: # NOTE: This does not affect the external_labels set in prometheus config. # clusterIDConfigmap: cluster-id-configmap + ## Provide a full name override for the Prometheus server. + # fullnameOverride: "" + ## Prometheus server container name ## enabled: true @@ -802,7 +863,7 @@ prometheus: ## image: repository: quay.io/prometheus/prometheus - tag: v2.49.1 + tag: v2.50.1 pullPolicy: IfNotPresent ## prometheus server priorityClassName @@ -1009,6 +1070,7 @@ prometheus: ## Prometheus server data Persistent Volume annotations ## annotations: {} + # helm.sh/resource-policy: keep # https://helm.sh/docs/howto/charts_tips_and_tricks/#tell-helm-not-to-uninstall-a-resource ## Prometheus server data Persistent Volume existing claim name ## Requires server.persistentVolume.enabled: true @@ -1088,12 +1150,12 @@ prometheus: ## Prometheus server readiness and liveness probe initial delay and timeout ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ ## - readinessProbeInitialDelay: 30 - readinessProbeTimeout: 30 + readinessProbeInitialDelay: 5 + readinessProbeTimeout: 3 readinessProbeFailureThreshold: 3 readinessProbeSuccessThreshold: 1 - livenessProbeInitialDelay: 30 - livenessProbeTimeout: 30 + livenessProbeInitialDelay: 5 + livenessProbeTimeout: 3 livenessProbeFailureThreshold: 3 livenessProbeSuccessThreshold: 1 @@ -1163,9 +1225,32 @@ prometheus: ## terminationGracePeriodSeconds: 300 - ## Prometheus data retention period (default if not specified is 15 days) + ## Prometheus data retention period (default if not specified is 97 hours) ## - retention: 15d # 50h. This must be greater than or equal to etlHourlyStoreDurationHours + ## Kubecost builds up its own persistent store of metric data on the + ## filesystem (usually a PV) and, when using ETL Backup and/or Federated + ## ETL, in more durable object storage like S3 or GCS. Kubecost's data + ## retention is _not_ tied to the configured Prometheus retention. + ## + ## For data durability, we recommend using ETL Backup instead of relying on + ## Prometheus retention. + ## + ## Lower retention values will affect Prometheus by reducing resource + ## consumption and increasing stability. It _must not_ be set below or equal + ## to kubecostModel.etlHourlyStoreDurationHours, otherwise empty data sets + ## may overwrite good data sets. For now, it must also be >= 49h for Daily + ## ETL stability. + ## + ## "ETL Rebuild" and "ETL Repair" is only possible on data available within + ## this retention window. This is an extremely rare operation. + ## + ## If you want maximum security in the event of a Kubecost agent + ## (cost-model) outage, increase this value. The current default of 97h is + ## intended to balance Prometheus stability and resource consumption + ## against the event of an outage in Kubecost which would necessitate a + ## version change. 4 days should provide enough time for most users to + ## notice a problem and initiate corrective action. + retention: 97h # retentionSize: should be significantly greater than the storage used in the number of hours set in etlHourlyStoreDurationHours # Install Prometheus Alert Manager @@ -1174,6 +1259,9 @@ prometheus: ## enabled: false + ## Provide a full name override for Prometheus alertmanager. + # fullnameOverride: "" + strategy: type: Recreate rollingUpdate: null @@ -1186,7 +1274,7 @@ prometheus: ## image: repository: quay.io/prometheus/alertmanager - tag: v0.26.0 + tag: v0.27.0 pullPolicy: IfNotPresent ## alertmanager priorityClassName @@ -1472,7 +1560,7 @@ prometheus: ## image: repository: quay.io/prometheus-operator/prometheus-config-reloader - tag: v0.71.2 + tag: v0.72.0 pullPolicy: IfNotPresent ## Additional configmap-reload container arguments @@ -1512,7 +1600,7 @@ prometheus: ## image: repository: quay.io/prometheus-operator/prometheus-config-reloader - tag: v0.71.2 + tag: v0.72.0 pullPolicy: IfNotPresent ## Additional configmap-reload container arguments @@ -1545,6 +1633,9 @@ prometheus: ## enabled: false + ## Provide a full name override for node exporter. + # fullnameOverride: "" + ## If true, node-exporter pods share the host network namespace ## hostNetwork: true @@ -1676,6 +1767,9 @@ prometheus: ## enabled: false + ## Provide a full name override for Prometheus push gateway. + # fullnameOverride: "" + ## Use an alternate scheduler, e.g. "stork". ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ ## @@ -2126,6 +2220,8 @@ networkCosts: # every 30 minutes. trafficLogging: true + logLevel: info + # Port will set both the containerPort and hostPort to this value. # These must be identical due to network-costs being run on hostNetwork port: 3001 @@ -2249,9 +2345,6 @@ networkCosts: ## Used for HA mode in Business & Enterprise tier ## kubecostDeployment: - # Instead of a kubecost-analyzer Deployment, you can set it to be a StatefulSet as for volumeClaimTemplates usage and real stateful behaviour - statefulSet: - enabled: false replicas: 1 # deploymentStrategy: # rollingUpdate: @@ -2261,6 +2354,7 @@ kubecostDeployment: labels: {} annotations: {} + ## Kubecost Forecasting forecasts future cost patterns based on historical ## patterns observed by Kubecost. forecasting: @@ -2270,7 +2364,7 @@ forecasting: # image provided (registry, image, tag) will be used for the forecasting # container. # Example: fullImageName: gcr.io/kubecost1/forecasting:v0.0.1 - fullImageName: gcr.io/kubecost1/kubecost-modeling:v0.1.3 + fullImageName: gcr.io/kubecost1/kubecost-modeling:v0.1.5 # Resource specification block for the forecasting container. resources: @@ -2380,6 +2474,11 @@ kubecostAggregator: periodSeconds: 10 failureThreshold: 200 + ## Add a priority class to the aggregator pod + # priority: + # enabled: false + # name: "" + # extraEnv: # - name: SOME_VARIABLE # value: "some_value" @@ -2407,7 +2506,22 @@ kubecostAggregator: # port: 40000 # targetPort: 40000 # containerPort: 40000 - securityContext: {} # Define a securityContext for the aggregator pod. This will take highest precedence. + ## Define a securityContext for the aggregator pod. This will take highest precedence. + securityContext: {} + ## Define the container-level security context for the aggregator pod. This will take highest precedence. + # containerSecurityContext: {} + + ## Provide a Service Account name for aggregator. + # serviceAccountName: "" + + ## Define a nodeSelector for the aggregator pod + # nodeSelector: {} + + ## Define tolerations for the aggregator pod + # tolerations: [] + + ## Define Pod affinity for the aggregator pod + # affinity: {} ## Creates a new container/pod to retrieve CloudCost data. By default it uses ## the same serviceaccount as the cost-analyzer pod. A custom serviceaccount @@ -2432,6 +2546,26 @@ kubecostAggregator: periodSeconds: 10 failureThreshold: 200 + ## Add a nodeSelector for aggregator cloud costs + # nodeSelector: {} + + ## Tolerations for the aggregator cloud costs + # tolerations: {} + + ## Affinity for the aggregator cloud costs + # affinity: {} + + ## ServiceAccount for the aggregator cloud costs + # serviceAccountName: "" + + ## Define environment variables for cloud cost + # env: {} + + ## Configure the Collections service for aggregator. + # collections: + # cache: + # enabled: false + # Jaeger is an optional container attached to wherever the Aggregator # container is running. It is used for performance investigation. Enable if # Kubecost Support asks. @@ -2486,12 +2620,15 @@ diagnostics: tolerations: {} affinity: {} +## Provide a full name override for the diagnostics Deployment. +# diagnosticsFullnameOverride: "" + # Kubecost Cluster Controller for Right Sizing and Cluster Turndown clusterController: enabled: false image: repository: gcr.io/kubecost1/cluster-controller - tag: v0.15.2 + tag: v0.16.0 imagePullPolicy: Always ## PriorityClassName ## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass @@ -2499,6 +2636,17 @@ clusterController: # Set custom tolerations for the cluster controller. tolerations: [] actionConfigs: + # this configures the Kubecost Cluster Turndown action + # for more details, see documentation at https://github.com/kubecost/cluster-turndown/tree/develop?tab=readme-ov-file#setting-a-turndown-schedule + clusterTurndown: [] + # - name: my-schedule + # start: "2024-02-09T00:00:00Z" + # end: "2024-02-09T12:00:00Z" + # repeat: daily + # - name: my-schedule2 + # start: "2024-02-09T00:00:00Z" + # end: "2024-02-09T01:00:00Z" + # repeat: weekly # this configures the Kubecost Namespace Turndown action # for more details, see documentation at https://docs.kubecost.com/using-kubecost/navigating-the-kubecost-ui/savings/savings-actions#namespace-turndown namespaceTurndown: @@ -2528,21 +2676,54 @@ clusterController: # allowSharedCore: false # allowCostIncrease: false # recommendationType: '' - # this configures the Kubecost Request Sizing action - # for more details, see documentation at https://docs.kubecost.com/using-kubecost/navigating-the-kubecost-ui/savings/savings-actions#automated-request-sizing + # This configures the Kubecost Continuous Request Sizing Action + # + # Using this configuration overrides annotation-based configuration of + # Continuous Request Sizing. Annotation configuration will be ignored while + # this configuration method is present in the cluster. + # + # For more details, see documentation at https://docs.kubecost.com/using-kubecost/navigating-the-kubecost-ui/savings/savings-actions#automated-request-sizing containerRightsize: + # Workloads can be selected by an _exact_ key (namespace, controllerKind, + # controllerName). This will only match a single controller. The cluster + # ID is current irrelevant because Cluster Controller can only modify + # workloads within the cluster it is running in. # workloads: # - clusterID: cluster-one # namespace: my-namespace # controllerKind: deployment # controllerName: my-controller + # An alternative to exact key selection is filter selection. The filters + # are syntactically identical to Kubecost's "v2" filters [1] but only + # support a small set of filter fields, those being: + # - namespace + # - controllerKind + # - controllerName + # - label + # - annotation + # + # If multiple filters are listed, they will be ORed together at the top + # level. + # + # See the examples below. + # + # [1] https://docs.kubecost.com/apis/apis-overview/filters-api + # filterConfig: + # - filter: | + # namespace:"abc"+controllerKind:"deployment" + # - filter: | + # controllerName:"abc123"+controllerKind:"daemonset" + # - filter: | + # namespace:"foo"+controllerKind!:"statefulset" + # - filter: | + # namespace:"bar","baz" # schedule: # start: "2024-01-30T15:04:05Z" # frequencyMinutes: 5 # recommendationQueryWindow: "48h" # lastModified: '' - # targetUtilizationCPU: 0.8 - # targetUtilizationMemory: 0.8 + # targetUtilizationCPU: 0.8 # results in a cpu request setting that is 20% higher than the max seen over last 48h + # targetUtilizationMemory: 0.8 # results in a RAM request setting that is 20% higher than the max seen over last 48h kubescaler: # If true, will cause all (supported) workloads to be have their requests @@ -2598,6 +2779,15 @@ grafana: rbac: create: true + serviceAccount: + create: true + name: "" + + ## Provide a full name override for the Grafana Deployment. + # fullnameOverride: "" + ## Provide a name override for the Grafana Deployment. + # nameOverride: "" + ## Configure grafana datasources ## ref: http://docs.grafana.org/administration/provisioning/#datasources ## @@ -2640,7 +2830,7 @@ grafana: ## Container image settings for the Grafana deployment image: repository: grafana/grafana - tag: 10.3.1 + tag: 10.3.4 pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. @@ -2834,7 +3024,7 @@ grafana: sidecar: image: repository: kiwigrid/k8s-sidecar - tag: 1.25.4 + tag: 1.26.0 pullPolicy: IfNotPresent resources: {} dashboards: @@ -2894,6 +3084,8 @@ awsstore: # Use a custom nodeSelector for AWSStore. nodeSelector: {} # kubernetes.io/arch: amd64 + ## Annotations for the AWSStore ServiceAccount. + annotations: {} ## Federated ETL Architecture ## Ref: https://docs.kubecost.com/install-and-configure/install/multi-cluster/federated-etl @@ -2935,113 +3127,123 @@ costEventsAudit: ## # readonly: false -# These configs can also be set from the Settings page in the Kubecost product UI -# Values in this block override config changes in the Settings UI on pod restart -# +# # These configs can also be set from the Settings page in the Kubecost product +# # UI. Values in this block override config changes in the Settings UI on pod +# # restart # kubecostProductConfigs: -# An optional list of cluster definitions that can be added for frontend access. The local -# cluster is *always* included by default, so this list is for non-local clusters. -# Ref: https://github.com/kubecost/docs/blob/main/multi-cluster.md - # clusters: - # - name: "Cluster A" - # address: http://cluster-a.kubecost.com:9090 - # # Optional authentication credentials - only basic auth is currently supported. - # auth: - # type: basic - # # Secret name should be a secret formatted based on: https://github.com/kubecost/docs/blob/main/ingress-examples.md - # secretName: cluster-a-auth - # # Or pass auth directly as base64 encoded user:pass - # data: YWRtaW46YWRtaW4= - # # Or user and pass directly - # user: admin - # pass: admin - # - name: "Cluster B" - # address: http://cluster-b.kubecost.com:9090 - # defaultModelPricing: # default monthly resource prices, used predominately for on-prem clusters. Use quotes if setting "0.00" for any item. - # CPU: 28.0 - # spotCPU: 4.86 - # RAM: 3.09 - # spotRAM: 0.65 - # GPU: 693.50 - # spotGPU: 225.0 - # storage: 0.04 - # zoneNetworkEgress: 0.01 - # regionNetworkEgress: 0.01 - # internetNetworkEgress: 0.12 - # enabled: true - # # The cluster profile represents a predefined set of parameters to use when calculating savings. - # # Possible values are: [ development, production, high-availability ] - # clusterProfile: production - # customPricesEnabled: false # This makes the default view custom prices-- generally used for on-premises clusters - # spotLabel: lifecycle - # spotLabelValue: Ec2Spot - # gpuLabel: gpu - # gpuLabelValue: true - # awsServiceKeyName: ACCESSKEYID - # awsServiceKeyPassword: fakepassword # Only use if your values.yaml are stored encrypted. Otherwise provide an existing secret via serviceKeySecretName - # awsSpotDataRegion: us-east-1 - # awsSpotDataBucket: spot-data-feed-s3-bucket - # awsSpotDataPrefix: dev - # athenaProjectID: "530337586277" # The AWS AccountID where the Athena CUR is. Generally your masterpayer account - # athenaBucketName: "s3://aws-athena-query-results-530337586277-us-east-1" - # athenaRegion: us-east-1 - # athenaDatabase: athenacurcfn_athena_test1 - # athenaTable: "athena_test1" - # athenaWorkgroup: "primary" # The default workgroup in AWS is 'primary' - # masterPayerARN: "" - # projectID: "123456789" # Also known as AccountID on AWS -- the current account/project that this instance of Kubecost is deployed on. - # gcpSecretName: gcp-secret # Name of a secret representing the gcp service key - # gcpSecretKeyName: compute-viewer-kubecost-key.json # Name of the secret's key containing the gcp service key - # bigQueryBillingDataDataset: billing_data.gcp_billing_export_v1_01AC9F_74CF1D_5565A2 - # labelMappingConfigs: # names of k8s labels or annotations used to designate different allocation concepts - # enabled: true - # owner_label: "owner" - # team_label: "team" - # department_label: "dept" - # product_label: "product" - # environment_label: "env" - # namespace_external_label: "kubernetes_namespace" # external labels/tags are used to map external cloud costs to kubernetes concepts - # cluster_external_label: "kubernetes_cluster" - # controller_external_label: "kubernetes_controller" - # product_external_label: "kubernetes_label_app" - # service_external_label: "kubernetes_service" - # deployment_external_label: "kubernetes_deployment" - # owner_external_label: "kubernetes_label_owner" - # team_external_label: "kubernetes_label_team" - # environment_external_label: "kubernetes_label_env" - # department_external_label: "kubernetes_label_department" - # statefulset_external_label: "kubernetes_statefulset" - # daemonset_external_label: "kubernetes_daemonset" - # pod_external_label: "kubernetes_pod" - # grafanaURL: "" - # # Provide a mapping from Account ID to a readable Account Name in a key/value object. Provide Account IDs as they are displayed in CloudCost - # # as the 'key' and the Account Name associated with it as the 'value' - # cloudAccountMapping: - # EXAMPLE_ACCOUNT_ID: EXAMPLE_ACCOUNT_NAME - # clusterName: "" # clusterName is the default context name in settings. - # clusterAccountID: "" # Manually set Account property for assets - # currencyCode: "USD" # official support for USD, AUD, BRL, CAD, CHF, CNY, DKK, EUR, GBP, IDR, INR, JPY, NOK, PLN, SEK - # azureBillingRegion: US # Represents 2-letter region code, e.g. West Europe = NL, Canada = CA. ref: https://en.wikipedia.org/wiki/List_of_ISO_3166_country_codes - # azureSubscriptionID: 0bd50fdf-c923-4e1e-850c-196dd3dcc5d3 - # azureClientID: f2ef6f7d-71fb-47c8-b766-8d63a19db017 - # azureTenantID: 72faf3ff-7a3f-4597-b0d9-7b0b201bb23a - # azureClientPassword: fake key # Only use if your values.yaml are stored encrypted. Otherwise provide an existing secret via serviceKeySecretName - # azureOfferDurableID: "MS-AZR-0003p" - # discount: "" # percentage discount applied to compute - # negotiatedDiscount: "" # custom negotiated cloud provider discount - # defaultIdle: false - # serviceKeySecretName: "" # Use an existing AWS or Azure secret with format as in aws-service-key-secret.yaml or azure-service-key-secret.yaml. Leave blank if using createServiceKeySecret - # createServiceKeySecret: true # Creates a secret representing your cloud service key based on data in values.yaml. If you are storing unencrypted values, add a secret manually - # sharedNamespaces: "" # namespaces with shared workloads, example value: "kube-system\,ingress-nginx\,kubecost\,monitoring" - # sharedOverhead: "" # value representing a fixed external cost per month to be distributed among aggregations. - # shareTenancyCosts: true # enable or disable sharing costs such as cluster management fees (defaults to "true" on Settings page) - # metricsConfigs: # configuration for metrics emitted by Kubecost - # disabledMetrics: [] # list of metrics that Kubecost will not emit. Note that disabling metrics can lead to unexpected behavior in the cost-model. - # productKey: # apply business or enterprise product license - # key: "" - # enabled: false - # secretname: productkeysecret # create a secret out of a file named productkey.json of format { "key": "kc-b1325234" }. If the secretname is specified, a configmap with the key will not be created - # mountPath: "/some/custom/path/productkey.json" # (use instead of secretname) declare the path at which the product key file is mounted (eg. by a secrets provisioner). The file must be of format { "key": "kc-b1325234" } +# # An optional list of cluster definitions that can be added for frontend +# # access. The local cluster is *always* included by default, so this list is +# # for non-local clusters. +# clusters: +# - name: "Cluster A" +# address: http://cluster-a.kubecost.com:9090 +# # Optional authentication credentials - only basic auth is currently supported. +# auth: +# type: basic +# # Secret name should be a secret formatted based on: https://github.com/kubecost/docs/blob/main/ingress-examples.md +# secretName: cluster-a-auth +# # Or pass auth directly as base64 encoded user:pass +# data: YWRtaW46YWRtaW4= +# # Or user and pass directly +# user: admin +# pass: admin +# - name: "Cluster B" +# address: http://cluster-b.kubecost.com:9090 +# # Enabling customPricesEnabled and defaultModelPricing instructs Kubecost to +# # use these custom monthly resource prices when reporting node costs. Note, +# # that the below configuration is for the monthly cost of the resource. +# # Kubecost considers there to be 730 hours in a month. Also note, that these +# # configurations will have no effect on metrics emitted such as +# # `node_ram_hourly_cost` or `node_cpu_hourly_cost`. +# # Ref: https://docs.kubecost.com/install-and-configure/install/provider-installations/air-gapped +# customPricesEnabled: false +# defaultModelPricing: +# enabled: true +# CPU: "28.0" +# spotCPU: "4.86" +# RAM: "3.09" +# spotRAM: "0.65" +# GPU: "693.50" +# spotGPU: "225.0" +# storage: "0.04" +# zoneNetworkEgress: "0.01" +# regionNetworkEgress: "0.01" +# internetNetworkEgress: "0.12" +# # The cluster profile represents a predefined set of parameters to use when calculating savings. +# # Possible values are: [ development, production, high-availability ] +# clusterProfile: production +# spotLabel: lifecycle +# spotLabelValue: Ec2Spot +# gpuLabel: gpu +# gpuLabelValue: true +# alibabaServiceKeyName: "" +# alibabaServiceKeyPassword: "" +# awsServiceKeyName: ACCESSKEYID +# awsServiceKeyPassword: fakepassword # Only use if your values.yaml are stored encrypted. Otherwise provide an existing secret via serviceKeySecretName +# awsSpotDataRegion: us-east-1 +# awsSpotDataBucket: spot-data-feed-s3-bucket +# awsSpotDataPrefix: dev +# athenaProjectID: "530337586277" # The AWS AccountID where the Athena CUR is. Generally your masterpayer account +# athenaBucketName: "s3://aws-athena-query-results-530337586277-us-east-1" +# athenaRegion: us-east-1 +# athenaDatabase: athenacurcfn_athena_test1 +# athenaTable: "athena_test1" +# athenaWorkgroup: "primary" # The default workgroup in AWS is 'primary' +# masterPayerARN: "" +# projectID: "123456789" # Also known as AccountID on AWS -- the current account/project that this instance of Kubecost is deployed on. +# gcpSecretName: gcp-secret # Name of a secret representing the gcp service key +# gcpSecretKeyName: compute-viewer-kubecost-key.json # Name of the secret's key containing the gcp service key +# bigQueryBillingDataDataset: billing_data.gcp_billing_export_v1_01AC9F_74CF1D_5565A2 +# labelMappingConfigs: # names of k8s labels or annotations used to designate different allocation concepts +# enabled: true +# owner_label: "owner" +# team_label: "team" +# department_label: "dept" +# product_label: "product" +# environment_label: "env" +# namespace_external_label: "kubernetes_namespace" # external labels/tags are used to map external cloud costs to kubernetes concepts +# cluster_external_label: "kubernetes_cluster" +# controller_external_label: "kubernetes_controller" +# product_external_label: "kubernetes_label_app" +# service_external_label: "kubernetes_service" +# deployment_external_label: "kubernetes_deployment" +# owner_external_label: "kubernetes_label_owner" +# team_external_label: "kubernetes_label_team" +# environment_external_label: "kubernetes_label_env" +# department_external_label: "kubernetes_label_department" +# statefulset_external_label: "kubernetes_statefulset" +# daemonset_external_label: "kubernetes_daemonset" +# pod_external_label: "kubernetes_pod" +# grafanaURL: "" +# # Provide a mapping from Account ID to a readable Account Name in a key/value object. Provide Account IDs as they are displayed in CloudCost +# # as the 'key' and the Account Name associated with it as the 'value' +# cloudAccountMapping: +# EXAMPLE_ACCOUNT_ID: EXAMPLE_ACCOUNT_NAME +# clusterName: "" # clusterName is the default context name in settings. +# clusterAccountID: "" # Manually set Account property for assets +# currencyCode: "USD" # official support for USD, AUD, BRL, CAD, CHF, CNY, DKK, EUR, GBP, IDR, INR, JPY, NOK, PLN, SEK +# azureBillingRegion: US # Represents 2-letter region code, e.g. West Europe = NL, Canada = CA. ref: https://en.wikipedia.org/wiki/List_of_ISO_3166_country_codes +# azureSubscriptionID: 0bd50fdf-c923-4e1e-850c-196dd3dcc5d3 +# azureClientID: f2ef6f7d-71fb-47c8-b766-8d63a19db017 +# azureTenantID: 72faf3ff-7a3f-4597-b0d9-7b0b201bb23a +# azureClientPassword: fake key # Only use if your values.yaml are stored encrypted. Otherwise provide an existing secret via serviceKeySecretName +# azureOfferDurableID: "MS-AZR-0003p" +# discount: "" # percentage discount applied to compute +# negotiatedDiscount: "" # custom negotiated cloud provider discount +# defaultIdle: false +# serviceKeySecretName: "" # Use an existing AWS or Azure secret with format as in aws-service-key-secret.yaml or azure-service-key-secret.yaml. Leave blank if using createServiceKeySecret +# createServiceKeySecret: true # Creates a secret representing your cloud service key based on data in values.yaml. If you are storing unencrypted values, add a secret manually +# sharedNamespaces: "" # namespaces with shared workloads, example value: "kube-system\,ingress-nginx\,kubecost\,monitoring" +# sharedOverhead: "" # value representing a fixed external cost per month to be distributed among aggregations. +# shareTenancyCosts: true # enable or disable sharing costs such as cluster management fees (defaults to "true" on Settings page) +# metricsConfigs: # configuration for metrics emitted by Kubecost +# disabledMetrics: [] # list of metrics that Kubecost will not emit. Note that disabling metrics can lead to unexpected behavior in the cost-model. +# productKey: # Apply enterprise product license +# enabled: false +# key: "" +# secretname: productkeysecret # Reference an existing k8s secret created from a file named productkey.json of format { "key": "enterprise-key-here" }. If the secretname is specified, a configmap with the key will not be created. +# mountPath: "/some/custom/path/productkey.json" # (use instead of secretname) Declare the path at which the product key file is mounted (eg. by a secrets provisioner). The file must be of format { "key": "enterprise-key-here" }. +# carbonEstimates: false # Enables Kubecost beta carbon estimation endpoints /assets/carbon and /allocations/carbon ## Specify an existing Kubernetes Secret holding the cloud integration information. This Secret must contain ## a key with name `cloud-integration.json` and the contents must be in a specific format. It is expected @@ -3095,6 +3297,9 @@ costEventsAudit: # ingestPodUID: false # Enables using UIDs to uniquely ID pods. This requires either Kubecost's replicated KSM metrics, or KSM v2.1.0+. This may impact performance, and changes the default cost-model allocation behavior. # regionOverrides: "region1,region2,region3" # list of regions which will override default costmodel provider regions +# Explicit name of the ConfigMap to use for pricing overrides. If not set, a default will apply. +# pricingConfigmapName: "" + # -- Array of extra K8s manifests to deploy ## Note: Supports use of custom Helm templates extraObjects: [] diff --git a/charts/kuma/kuma/Chart.yaml b/charts/kuma/kuma/Chart.yaml index dfe6fb54e..d7b10d57a 100644 --- a/charts/kuma/kuma/Chart.yaml +++ b/charts/kuma/kuma/Chart.yaml @@ -4,7 +4,7 @@ annotations: catalog.cattle.io/namespace: kuma-system catalog.cattle.io/release-name: kuma apiVersion: v2 -appVersion: 2.6.2 +appVersion: 2.6.4 description: A Helm chart for the Kuma Control Plane home: https://github.com/kumahq/kuma icon: https://kuma.io/assets/images/brand/kuma-logo-new.svg @@ -20,4 +20,4 @@ maintainers: name: nickolaev name: kuma type: application -version: 2.6.2 +version: 2.6.4 diff --git a/charts/kuma/kuma/README.md b/charts/kuma/kuma/README.md index c0423333c..a64ff7704 100644 --- a/charts/kuma/kuma/README.md +++ b/charts/kuma/kuma/README.md @@ -2,7 +2,7 @@ A Helm chart for the Kuma Control Plane -![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 2.6.2](https://img.shields.io/badge/Version-2.6.2-informational?style=flat-square) ![AppVersion: 2.6.2](https://img.shields.io/badge/AppVersion-2.6.2-informational?style=flat-square) +![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 2.6.4](https://img.shields.io/badge/Version-2.6.4-informational?style=flat-square) ![AppVersion: 2.6.4](https://img.shields.io/badge/AppVersion-2.6.4-informational?style=flat-square) **Homepage:** diff --git a/charts/linkerd/linkerd-control-plane/Chart.yaml b/charts/linkerd/linkerd-control-plane/Chart.yaml index c50601fc6..4ba8f23d1 100644 --- a/charts/linkerd/linkerd-control-plane/Chart.yaml +++ b/charts/linkerd/linkerd-control-plane/Chart.yaml @@ -6,7 +6,7 @@ annotations: catalog.cattle.io/kube-version: '>=1.22.0-0' catalog.cattle.io/release-name: linkerd-control-plane apiVersion: v2 -appVersion: edge-24.3.4 +appVersion: edge-24.3.5 dependencies: - name: partials repository: file://./charts/partials @@ -26,4 +26,4 @@ name: linkerd-control-plane sources: - https://github.com/linkerd/linkerd2/ type: application -version: 2024.3.4 +version: 2024.3.5 diff --git a/charts/linkerd/linkerd-control-plane/README.md b/charts/linkerd/linkerd-control-plane/README.md index 80069744b..90aa52350 100644 --- a/charts/linkerd/linkerd-control-plane/README.md +++ b/charts/linkerd/linkerd-control-plane/README.md @@ -3,7 +3,7 @@ Linkerd gives you observability, reliability, and security for your microservices — with no code change required. -![Version: 2024.3.4](https://img.shields.io/badge/Version-2024.3.4-informational?style=flat-square) +![Version: 2024.3.5](https://img.shields.io/badge/Version-2024.3.5-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: edge-XX.X.X](https://img.shields.io/badge/AppVersion-edge--XX.X.X-informational?style=flat-square) diff --git a/charts/linkerd/linkerd-control-plane/templates/proxy-injector-rbac.yaml b/charts/linkerd/linkerd-control-plane/templates/proxy-injector-rbac.yaml index abf4edf5d..c2c84c5c1 100644 --- a/charts/linkerd/linkerd-control-plane/templates/proxy-injector-rbac.yaml +++ b/charts/linkerd/linkerd-control-plane/templates/proxy-injector-rbac.yaml @@ -115,5 +115,6 @@ webhooks: apiGroups: [""] apiVersions: ["v1"] resources: ["pods", "services"] + scope: "Namespaced" sideEffects: None timeoutSeconds: {{ .Values.proxyInjector.timeoutSeconds | default 10 }} diff --git a/charts/linkerd/linkerd-control-plane/values.yaml b/charts/linkerd/linkerd-control-plane/values.yaml index ca522a31c..8d395c821 100644 --- a/charts/linkerd/linkerd-control-plane/values.yaml +++ b/charts/linkerd/linkerd-control-plane/values.yaml @@ -22,7 +22,7 @@ controlPlaneTracing: false # -- namespace to send control plane traces to controlPlaneTracingNamespace: linkerd-jaeger # -- control plane version. See Proxy section for proxy version -linkerdVersion: edge-24.3.4 +linkerdVersion: edge-24.3.5 # -- default kubernetes deployment strategy deploymentStrategy: rollingUpdate: diff --git a/charts/linkerd/linkerd-crds/Chart.yaml b/charts/linkerd/linkerd-crds/Chart.yaml index a4d2f4245..a084e8795 100644 --- a/charts/linkerd/linkerd-crds/Chart.yaml +++ b/charts/linkerd/linkerd-crds/Chart.yaml @@ -23,4 +23,4 @@ name: linkerd-crds sources: - https://github.com/linkerd/linkerd2/ type: application -version: 2024.3.4 +version: 2024.3.5 diff --git a/charts/linkerd/linkerd-crds/README.md b/charts/linkerd/linkerd-crds/README.md index 1bfa53845..6fd820b4f 100644 --- a/charts/linkerd/linkerd-crds/README.md +++ b/charts/linkerd/linkerd-crds/README.md @@ -3,7 +3,7 @@ Linkerd gives you observability, reliability, and security for your microservices — with no code change required. -![Version: 2024.3.4](https://img.shields.io/badge/Version-2024.3.4-informational?style=flat-square) +![Version: 2024.3.5](https://img.shields.io/badge/Version-2024.3.5-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) **Homepage:** diff --git a/charts/linkerd/linkerd-crds/templates/workload/external-workload.yaml b/charts/linkerd/linkerd-crds/templates/workload/external-workload.yaml index 56cb3bddb..a60aff48b 100644 --- a/charts/linkerd/linkerd-crds/templates/workload/external-workload.yaml +++ b/charts/linkerd/linkerd-crds/templates/workload/external-workload.yaml @@ -163,6 +163,8 @@ spec: - name: v1beta1 served: true storage: true + subresources: + status: {} schema: openAPIV3Schema: description: >- diff --git a/charts/metallb/metallb/Chart.lock b/charts/metallb/metallb/Chart.lock index 630f1f826..8c3e319bd 100644 --- a/charts/metallb/metallb/Chart.lock +++ b/charts/metallb/metallb/Chart.lock @@ -1,9 +1,9 @@ dependencies: - name: crds repository: "" - version: 0.14.3 + version: 0.14.4 - name: frr-k8s repository: https://metallb.github.io/frr-k8s - version: 0.0.8 -digest: sha256:175725c494156eecae069340d366284a1503fb2977cbe7df0f196b468599a592 -generated: "2024-01-30T17:45:01.476353104+01:00" + version: 0.0.10 +digest: sha256:b86c6655696e26961e41859b7b88c31797091692d910c7d1baae05af8591c937 +generated: "2024-03-26T16:10:37.59968399+01:00" diff --git a/charts/metallb/metallb/Chart.yaml b/charts/metallb/metallb/Chart.yaml index 6fafa64f1..d496a4e00 100644 --- a/charts/metallb/metallb/Chart.yaml +++ b/charts/metallb/metallb/Chart.yaml @@ -5,16 +5,16 @@ annotations: catalog.cattle.io/namespace: metallb-system catalog.cattle.io/release-name: metallb apiVersion: v2 -appVersion: v0.14.3 +appVersion: v0.14.4 dependencies: - condition: crds.enabled name: crds repository: file://./charts/crds - version: 0.14.3 + version: 0.14.4 - condition: frrk8s.enabled name: frr-k8s repository: file://./charts/frr-k8s - version: 0.0.8 + version: 0.0.10 description: A network load-balancer implementation for Kubernetes using standard routing protocols home: https://metallb.universe.tf @@ -24,4 +24,4 @@ name: metallb sources: - https://github.com/metallb/metallb type: application -version: 0.14.3 +version: 0.14.4 diff --git a/charts/metallb/metallb/README.md b/charts/metallb/metallb/README.md index fd21de582..414014c00 100644 --- a/charts/metallb/metallb/README.md +++ b/charts/metallb/metallb/README.md @@ -17,7 +17,7 @@ Kubernetes: `>= 1.19.0-0` | Repository | Name | Version | |------------|------|---------| | | crds | 0.0.0 | -| https://metallb.github.io/frr-k8s | frr-k8s | 0.0.8 | +| https://metallb.github.io/frr-k8s | frr-k8s | 0.0.10 | ## Values @@ -120,10 +120,11 @@ Kubernetes: `>= 1.19.0-0` | speaker.frr.enabled | bool | `true` | | | speaker.frr.image.pullPolicy | string | `nil` | | | speaker.frr.image.repository | string | `"quay.io/frrouting/frr"` | | -| speaker.frr.image.tag | string | `"8.5.2"` | | +| speaker.frr.image.tag | string | `"9.0.2"` | | | speaker.frr.metricsPort | int | `7473` | | | speaker.frr.resources | object | `{}` | | | speaker.frrMetrics.resources | object | `{}` | | +| speaker.ignoreExcludeLB | bool | `false` | | | speaker.image.pullPolicy | string | `nil` | | | speaker.image.repository | string | `"quay.io/metallb/speaker"` | | | speaker.image.tag | string | `nil` | | diff --git a/charts/metallb/metallb/charts/crds/Chart.yaml b/charts/metallb/metallb/charts/crds/Chart.yaml index 6ee31afc6..ec1c7db8c 100644 --- a/charts/metallb/metallb/charts/crds/Chart.yaml +++ b/charts/metallb/metallb/charts/crds/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: v0.14.3 +appVersion: v0.14.4 description: MetalLB CRDs home: https://metallb.universe.tf icon: https://metallb.universe.tf/images/logo/metallb-white.png @@ -7,4 +7,4 @@ name: crds sources: - https://github.com/metallb/metallb type: application -version: 0.14.3 +version: 0.14.4 diff --git a/charts/metallb/metallb/charts/crds/templates/crds.yaml b/charts/metallb/metallb/charts/crds/templates/crds.yaml index febfc04c8..79497f125 100644 --- a/charts/metallb/metallb/charts/crds/templates/crds.yaml +++ b/charts/metallb/metallb/charts/crds/templates/crds.yaml @@ -2,8 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.1 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.14.0 name: bfdprofiles.metallb.io spec: group: metallb.io @@ -30,13 +29,24 @@ spec: name: v1beta1 schema: openAPIV3Schema: - description: BFDProfile represents the settings of the bfd session that can be optionally associated with a BGP session. + description: |- + BFDProfile represents the settings of the bfd session that can be + optionally associated with a BGP session. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -44,37 +54,57 @@ spec: description: BFDProfileSpec defines the desired state of BFDProfile. properties: detectMultiplier: - description: Configures the detection multiplier to determine packet loss. The remote transmission interval will be multiplied by this value to determine the connection loss detection timer. + description: |- + Configures the detection multiplier to determine + packet loss. The remote transmission interval will be multiplied + by this value to determine the connection loss detection timer. format: int32 maximum: 255 minimum: 2 type: integer echoInterval: - description: Configures the minimal echo receive transmission interval that this system is capable of handling in milliseconds. Defaults to 50ms + description: |- + Configures the minimal echo receive transmission + interval that this system is capable of handling in milliseconds. + Defaults to 50ms format: int32 maximum: 60000 minimum: 10 type: integer echoMode: - description: Enables or disables the echo transmission mode. This mode is disabled by default, and not supported on multi hops setups. + description: |- + Enables or disables the echo transmission mode. + This mode is disabled by default, and not supported on multi + hops setups. type: boolean minimumTtl: - description: 'For multi hop sessions only: configure the minimum expected TTL for an incoming BFD control packet.' + description: |- + For multi hop sessions only: configure the minimum + expected TTL for an incoming BFD control packet. format: int32 maximum: 254 minimum: 1 type: integer passiveMode: - description: 'Mark session as passive: a passive session will not attempt to start the connection and will wait for control packets from peer before it begins replying.' + description: |- + Mark session as passive: a passive session will not + attempt to start the connection and will wait for control packets + from peer before it begins replying. type: boolean receiveInterval: - description: The minimum interval that this system is capable of receiving control packets in milliseconds. Defaults to 300ms. + description: |- + The minimum interval that this system is capable of + receiving control packets in milliseconds. + Defaults to 300ms. format: int32 maximum: 60000 minimum: 10 type: integer transmitInterval: - description: The minimum transmission interval (less jitter) that this system wants to use to send BFD control packets in milliseconds. Defaults to 300ms + description: |- + The minimum transmission interval (less jitter) + that this system wants to use to send BFD control packets in + milliseconds. Defaults to 300ms format: int32 maximum: 60000 minimum: 10 @@ -93,8 +123,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.1 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.14.0 name: bgpadvertisements.metallb.io spec: group: metallb.io @@ -122,13 +151,25 @@ spec: name: v1beta1 schema: openAPIV3Schema: - description: BGPAdvertisement allows to advertise the IPs coming from the selected IPAddressPools via BGP, setting the parameters of the BGP Advertisement. + description: |- + BGPAdvertisement allows to advertise the IPs coming + from the selected IPAddressPools via BGP, setting the parameters of the + BGP Advertisement. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -147,28 +188,44 @@ spec: format: int32 type: integer communities: - description: The BGP communities to be associated with the announcement. Each item can be a standard community of the form 1234:1234, a large community of the form large:1234:1234:1234 or the name of an alias defined in the Community CRD. + description: |- + The BGP communities to be associated with the announcement. Each item can be a standard community of the + form 1234:1234, a large community of the form large:1234:1234:1234 or the name of an alias defined in the + Community CRD. items: type: string type: array ipAddressPoolSelectors: - description: A selector for the IPAddressPools which would get advertised via this advertisement. If no IPAddressPool is selected by this or by the list, the advertisement is applied to all the IPAddressPools. + description: |- + A selector for the IPAddressPools which would get advertised via this advertisement. + If no IPAddressPool is selected by this or by the list, the advertisement is applied to all the IPAddressPools. items: - description: A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects. + description: |- + A label selector is a label query over a set of resources. The result of matchLabels and + matchExpressions are ANDed. An empty label selector matches all objects. A null + label selector matches no objects. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array @@ -180,7 +237,10 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic @@ -191,27 +251,40 @@ spec: type: string type: array localPref: - description: The BGP LOCAL_PREF attribute which is used by BGP best path algorithm, Path with higher localpref is preferred over one with lower localpref. + description: |- + The BGP LOCAL_PREF attribute which is used by BGP best path algorithm, + Path with higher localpref is preferred over one with lower localpref. format: int32 type: integer nodeSelectors: description: NodeSelectors allows to limit the nodes to announce as next hops for the LoadBalancer IP. When empty, all the nodes having are announced as next hops. items: - description: A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects. + description: |- + A label selector is a label query over a set of resources. The result of matchLabels and + matchExpressions are ANDed. An empty label selector matches all objects. A null + label selector matches no objects. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array @@ -223,13 +296,18 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic type: array peers: - description: Peers limits the bgppeer to advertise the ips of the selected pools to. When empty, the loadbalancer IP is announced to all the BGPPeers configured. + description: |- + Peers limits the bgppeer to advertise the ips of the selected pools to. + When empty, the loadbalancer IP is announced to all the BGPPeers configured. items: type: string type: array @@ -247,8 +325,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.1 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.14.0 name: bgppeers.metallb.io spec: conversion: @@ -257,7 +334,7 @@ spec: clientConfig: caBundle: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpNSUlGWlRDQ0EwMmdBd0lCQWdJVU5GRW1XcTM3MVpKdGkrMmlSQzk1WmpBV1MxZ3dEUVlKS29aSWh2Y05BUUVMDQpCUUF3UWpFTE1Ba0dBMVVFQmhNQ1dGZ3hGVEFUQmdOVkJBY01ERVJsWm1GMWJIUWdRMmwwZVRFY01Cb0dBMVVFDQpDZ3dUUkdWbVlYVnNkQ0JEYjIxd1lXNTVJRXgwWkRBZUZ3MHlNakEzTVRrd09UTXlNek5hRncweU1qQTRNVGd3DQpPVE15TXpOYU1FSXhDekFKQmdOVkJBWVRBbGhZTVJVd0V3WURWUVFIREF4RVpXWmhkV3gwSUVOcGRIa3hIREFhDQpCZ05WQkFvTUUwUmxabUYxYkhRZ1EyOXRjR0Z1ZVNCTWRHUXdnZ0lpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElDDQpEd0F3Z2dJS0FvSUNBUUNxVFpxMWZRcC9vYkdlenhES0o3OVB3Ny94azJwellualNzMlkzb1ZYSm5sRmM4YjVlDQpma2ZZQnY2bndscW1keW5PL2phWFBaQmRQSS82aFdOUDBkdVhadEtWU0NCUUpyZzEyOGNXb3F0MGNTN3pLb1VpDQpvcU1tQ0QvRXVBeFFNZjhRZDF2c1gvVllkZ0poVTZBRXJLZEpIaXpFOUJtUkNkTDBGMW1OVW55Rk82UnRtWFZUDQpidkxsTDVYeTc2R0FaQVBLOFB4aVlDa0NtbDdxN0VnTWNiOXlLWldCYmlxQ3VkTXE5TGJLNmdKNzF6YkZnSXV4DQo1L1pXK2JraTB2RlplWk9ZODUxb1psckFUNzJvMDI4NHNTWW9uN0pHZVZkY3NoUnh5R1VpSFpSTzdkaXZVTDVTDQpmM2JmSDFYbWY1ZDQzT0NWTWRuUUV2NWVaOG8zeWVLa3ZrbkZQUGVJMU9BbjdGbDlFRVNNR2dhOGFaSG1URSttDQpsLzlMSmdDYjBnQmtPT0M0WnV4bWh2aERKV1EzWnJCS3pMQlNUZXN0NWlLNVlwcXRWVVk2THRyRW9FelVTK1lsDQpwWndXY2VQWHlHeHM5ZURsR3lNVmQraW15Y3NTU1UvVno2Mmx6MnZCS21NTXBkYldDQWhud0RsRTVqU2dyMjRRDQp0eGNXLys2N3d5KzhuQlI3UXdqVTFITndVRjBzeERWdEwrZ1NHVERnSEVZSlhZelYvT05zMy94TkpoVFNPSkxNDQpoeXNVdyttaGdackdhbUdXcHVIVU1DUitvTWJzMTc1UkcrQjJnUFFHVytPTjJnUTRyOXN2b0ZBNHBBQm8xd1dLDQpRYjRhY3pmeVVscElBOVFoSmFsZEY3S3dPSHVlV3gwRUNrNXg0T2tvVDBvWVp0dzFiR0JjRGtaSmF3SURBUUFCDQpvMU13VVRBZEJnTlZIUTRFRmdRVW90UlNIUm9IWTEyRFZ4R0NCdEhpb1g2ZmVFQXdId1lEVlIwakJCZ3dGb0FVDQpvdFJTSFJvSFkxMkRWeEdDQnRIaW9YNmZlRUF3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFOQmdrcWhraUc5dzBCDQpBUXNGQUFPQ0FnRUFSbkpsWWRjMTFHd0VxWnh6RDF2R3BDR2pDN2VWTlQ3aVY1d3IybXlybHdPYi9aUWFEa0xYDQpvVStaOVVXT1VlSXJTdzUydDdmQUpvVVAwSm5iYkMveVIrU1lqUGhvUXNiVHduOTc2ZldBWTduM3FMOXhCd1Y0DQphek41OXNjeUp0dlhMeUtOL2N5ak1ReDRLajBIMFg0bWJ6bzVZNUtzWWtYVU0vOEFPdWZMcEd0S1NGVGgrSEFDDQpab1Q5YnZHS25adnNHd0tYZFF0Wnh0akhaUjVqK3U3ZGtQOTJBT051RFNabS8rWVV4b2tBK09JbzdSR3BwSHNXDQo1ZTdNY0FTVXRtb1FORXd6dVFoVkJaRWQ1OGtKYjUrV0VWbGNzanlXNnRTbzErZ25tTWNqR1BsMWgxR2hVbjV4DQpFY0lWRnBIWXM5YWo1NmpBSjk1MVQvZjhMaWxmTlVnanBLQ0c1bnl0SUt3emxhOHNtdGlPdm1UNEpYbXBwSkI2DQo4bmdHRVluVjUrUTYwWFJ2OEhSSGp1VG9CRHVhaERrVDA2R1JGODU1d09FR2V4bkZpMXZYWUxLVllWb1V2MXRKDQo4dVdUR1pwNllDSVJldlBqbzg5ZytWTlJSaVFYUThJd0dybXE5c0RoVTlqTjA0SjdVL1RvRDFpNHE3VnlsRUc5DQorV1VGNkNLaEdBeTJIaEhwVncyTGFoOS9lUzdZMUZ1YURrWmhPZG1laG1BOCtqdHNZamJadnR5Mm1SWlF0UUZzDQpUU1VUUjREbUR2bVVPRVRmeStpRHdzK2RkWXVNTnJGeVVYV2dkMnpBQU4ydVl1UHFGY2pRcFNPODFzVTJTU3R3DQoxVzAyeUtYOGJEYmZFdjBzbUh3UzliQnFlSGo5NEM1Mjg0YXpsdTBmaUdpTm1OUEM4ckJLRmhBPQ0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ== service: - name: webhook-service + name: metallb-webhook-service namespace: {{ .Release.Namespace }} path: /convert conversionReviewVersions: @@ -290,10 +367,19 @@ spec: description: BGPPeer is the Schema for the peers API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -318,7 +404,9 @@ spec: minimum: 0 type: integer nodeSelectors: - description: Only connect to this peer on nodes that match one of these selectors. + description: |- + Only connect to this peer on nodes that match one of these + selectors. items: properties: matchExpressions: @@ -400,10 +488,19 @@ spec: description: BGPPeer is the Schema for the peers API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -413,6 +510,18 @@ spec: bfdProfile: description: The name of the BFD Profile to be used for the BFD session associated to the BGP session. If not set, the BFD session won't be set up. type: string + connectTime: + description: Requested BGP connect time, controls how long BGP waits between connection attempts to a neighbor. + type: string + x-kubernetes-validations: + - message: connect time should be between 1 seconds to 65535 + rule: duration(self).getSeconds() >= 1 && duration(self).getSeconds() <= 65535 + - message: connect time should contain a whole number of seconds + rule: duration(self).getMilliseconds() % 1000 == 0 + disableMP: + default: false + description: To set if we want to disable MP BGP that will separate IPv4 and IPv6 route exchanges into distinct BGP sessions. + type: boolean ebgpMultiHop: description: To set if the BGPPeer is multi-hops away. Needed for FRR mode only. type: boolean @@ -429,23 +538,36 @@ spec: minimum: 0 type: integer nodeSelectors: - description: Only connect to this peer on nodes that match one of these selectors. + description: |- + Only connect to this peer on nodes that match one of these + selectors. items: - description: A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects. + description: |- + A label selector is a label query over a set of resources. The result of matchLabels and + matchExpressions are ANDed. An empty label selector matches all objects. A null + label selector matches no objects. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array @@ -457,7 +579,10 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic @@ -466,7 +591,11 @@ spec: description: Authentication password for routers enforcing TCP MD5 authenticated sessions type: string passwordSecret: - description: passwordSecret is name of the authentication secret for BGP Peer. the secret must be of type "kubernetes.io/basic-auth", and created in the same namespace as the MetalLB deployment. The password is stored in the secret as the key "password". + description: |- + passwordSecret is name of the authentication secret for BGP Peer. + the secret must be of type "kubernetes.io/basic-auth", and created in the + same namespace as the MetalLB deployment. The password is stored in the + secret as the key "password". properties: name: description: name is unique within a namespace to reference a secret resource. @@ -498,7 +627,9 @@ spec: description: Source address to use when establishing the session. type: string vrf: - description: To set if we want to peer with the BGPPeer using an interface belonging to a host vrf + description: |- + To set if we want to peer with the BGPPeer using an interface belonging to + a host vrf type: string required: - myASN @@ -518,8 +649,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.1 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.14.0 name: communities.metallb.io spec: group: metallb.io @@ -533,13 +663,24 @@ spec: - name: v1beta1 schema: openAPIV3Schema: - description: Community is a collection of aliases for communities. Users can define named aliases to be used in the BGPPeer CRD. + description: |- + Community is a collection of aliases for communities. + Users can define named aliases to be used in the BGPPeer CRD. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -553,7 +694,9 @@ spec: description: The name of the alias for the community. type: string value: - description: The BGP community value corresponding to the given name. Can be a standard community of the form 1234:1234 or a large community of the form large:1234:1234:1234. + description: |- + The BGP community value corresponding to the given name. Can be a standard community of the form 1234:1234 + or a large community of the form large:1234:1234:1234. type: string type: object type: array @@ -571,8 +714,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.1 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.14.0 name: ipaddresspools.metallb.io spec: group: metallb.io @@ -596,13 +738,24 @@ spec: name: v1beta1 schema: openAPIV3Schema: - description: IPAddressPool represents a pool of IP addresses that can be allocated to LoadBalancer services. + description: |- + IPAddressPool represents a pool of IP addresses that can be allocated + to LoadBalancer services. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -610,39 +763,67 @@ spec: description: IPAddressPoolSpec defines the desired state of IPAddressPool. properties: addresses: - description: A list of IP address ranges over which MetalLB has authority. You can list multiple ranges in a single pool, they will all share the same settings. Each range can be either a CIDR prefix, or an explicit start-end range of IPs. + description: |- + A list of IP address ranges over which MetalLB has authority. + You can list multiple ranges in a single pool, they will all share the + same settings. Each range can be either a CIDR prefix, or an explicit + start-end range of IPs. items: type: string type: array autoAssign: default: true - description: AutoAssign flag used to prevent MetallB from automatic allocation for a pool. + description: |- + AutoAssign flag used to prevent MetallB from automatic allocation + for a pool. type: boolean avoidBuggyIPs: default: false - description: AvoidBuggyIPs prevents addresses ending with .0 and .255 to be used by a pool. + description: |- + AvoidBuggyIPs prevents addresses ending with .0 and .255 + to be used by a pool. type: boolean serviceAllocation: - description: AllocateTo makes ip pool allocation to specific namespace and/or service. The controller will use the pool with lowest value of priority in case of multiple matches. A pool with no priority set will be used only if the pools with priority can't be used. If multiple matching IPAddressPools are available it will check for the availability of IPs sorting the matching IPAddressPools by priority, starting from the highest to the lowest. If multiple IPAddressPools have the same priority, choice will be random. + description: |- + AllocateTo makes ip pool allocation to specific namespace and/or service. + The controller will use the pool with lowest value of priority in case of + multiple matches. A pool with no priority set will be used only if the + pools with priority can't be used. If multiple matching IPAddressPools are + available it will check for the availability of IPs sorting the matching + IPAddressPools by priority, starting from the highest to the lowest. If + multiple IPAddressPools have the same priority, choice will be random. properties: namespaceSelectors: - description: NamespaceSelectors list of label selectors to select namespace(s) for ip pool, an alternative to using namespace list. + description: |- + NamespaceSelectors list of label selectors to select namespace(s) for ip pool, + an alternative to using namespace list. items: - description: A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects. + description: |- + A label selector is a label query over a set of resources. The result of matchLabels and + matchExpressions are ANDed. An empty label selector matches all objects. A null + label selector matches no objects. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array @@ -654,7 +835,10 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic @@ -668,23 +852,36 @@ spec: description: Priority priority given for ip pool while ip allocation on a service. type: integer serviceSelectors: - description: ServiceSelectors list of label selector to select service(s) for which ip pool can be used for ip allocation. + description: |- + ServiceSelectors list of label selector to select service(s) for which ip pool + can be used for ip allocation. items: - description: A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects. + description: |- + A label selector is a label query over a set of resources. The result of matchLabels and + matchExpressions are ANDed. An empty label selector matches all objects. A null + label selector matches no objects. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array @@ -696,7 +893,10 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic @@ -720,8 +920,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.1 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.14.0 name: l2advertisements.metallb.io spec: group: metallb.io @@ -749,13 +948,24 @@ spec: name: v1beta1 schema: openAPIV3Schema: - description: L2Advertisement allows to advertise the LoadBalancer IPs provided by the selected pools via L2. + description: |- + L2Advertisement allows to advertise the LoadBalancer IPs provided + by the selected pools via L2. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -763,28 +973,43 @@ spec: description: L2AdvertisementSpec defines the desired state of L2Advertisement. properties: interfaces: - description: A list of interfaces to announce from. The LB IP will be announced only from these interfaces. If the field is not set, we advertise from all the interfaces on the host. + description: |- + A list of interfaces to announce from. The LB IP will be announced only from these interfaces. + If the field is not set, we advertise from all the interfaces on the host. items: type: string type: array ipAddressPoolSelectors: - description: A selector for the IPAddressPools which would get advertised via this advertisement. If no IPAddressPool is selected by this or by the list, the advertisement is applied to all the IPAddressPools. + description: |- + A selector for the IPAddressPools which would get advertised via this advertisement. + If no IPAddressPool is selected by this or by the list, the advertisement is applied to all the IPAddressPools. items: - description: A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects. + description: |- + A label selector is a label query over a set of resources. The result of matchLabels and + matchExpressions are ANDed. An empty label selector matches all objects. A null + label selector matches no objects. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array @@ -796,7 +1021,10 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic @@ -809,21 +1037,32 @@ spec: nodeSelectors: description: NodeSelectors allows to limit the nodes to announce as next hops for the LoadBalancer IP. When empty, all the nodes having are announced as next hops. items: - description: A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects. + description: |- + A label selector is a label query over a set of resources. The result of matchLabels and + matchExpressions are ANDed. An empty label selector matches all objects. A null + label selector matches no objects. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array @@ -835,7 +1074,10 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic @@ -849,3 +1091,70 @@ spec: storage: true subresources: status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: servicel2statuses.metallb.io +spec: + group: metallb.io + names: + kind: ServiceL2Status + listKind: ServiceL2StatusList + plural: servicel2statuses + singular: servicel2status + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.node + name: Allocated Node + type: string + name: v1beta1 + schema: + openAPIV3Schema: + description: ServiceL2Status reveals the actual traffic status of loadbalancer services in layer2 mode. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ServiceL2StatusSpec defines the desired state of ServiceL2Status. + type: object + status: + description: MetalLBServiceL2Status defines the observed state of ServiceL2Status. + properties: + interfaces: + description: Interfaces indicates the interfaces that receive the directed traffic + items: + description: InterfaceInfo defines interface info of layer2 announcement. + properties: + name: + description: Name the name of network interface card + type: string + type: object + type: array + node: + description: Node indicates the node that receives the directed traffic + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/charts/metallb/metallb/charts/frr-k8s/Chart.lock b/charts/metallb/metallb/charts/frr-k8s/Chart.lock index 882f5945b..b3bf374c2 100644 --- a/charts/metallb/metallb/charts/frr-k8s/Chart.lock +++ b/charts/metallb/metallb/charts/frr-k8s/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: crds repository: "" - version: 0.0.8 -digest: sha256:7efb8664deb296dbc6bc1311922b9b9203ec7c7611a07c7014e4aa92320f947b -generated: "2024-01-24T09:35:01.567823358+01:00" + version: 0.0.10 +digest: sha256:b92d4dc6adb2f6a3b4279cd0f2b134b659950bd605bb710cb70f7add611f0f6e +generated: "2024-02-21T14:41:51.446359887+01:00" diff --git a/charts/metallb/metallb/charts/frr-k8s/Chart.yaml b/charts/metallb/metallb/charts/frr-k8s/Chart.yaml index 0b2e8693d..4f4e63b62 100644 --- a/charts/metallb/metallb/charts/frr-k8s/Chart.yaml +++ b/charts/metallb/metallb/charts/frr-k8s/Chart.yaml @@ -1,10 +1,10 @@ apiVersion: v2 -appVersion: v0.0.8 +appVersion: v0.0.10 dependencies: - condition: crds.enabled name: crds repository: "" - version: 0.0.8 + version: 0.0.10 description: A cloud native wrapper of FRR home: https://metallb.universe.tf icon: https://metallb.universe.tf/images/logo/metallb-white.png @@ -13,4 +13,4 @@ name: frr-k8s sources: - https://github.com/metallb/frr-k8s type: application -version: 0.0.8 +version: 0.0.10 diff --git a/charts/metallb/metallb/charts/frr-k8s/README.md b/charts/metallb/metallb/charts/frr-k8s/README.md index fe4018830..89b7fd086 100644 --- a/charts/metallb/metallb/charts/frr-k8s/README.md +++ b/charts/metallb/metallb/charts/frr-k8s/README.md @@ -1,6 +1,6 @@ # frr-k8s -![Version: 0.0.8](https://img.shields.io/badge/Version-0.0.8-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v0.0.8](https://img.shields.io/badge/AppVersion-v0.0.8-informational?style=flat-square) +![Version: 0.0.10](https://img.shields.io/badge/Version-0.0.10-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v0.0.10](https://img.shields.io/badge/AppVersion-v0.0.10-informational?style=flat-square) A cloud native wrapper of FRR @@ -16,7 +16,7 @@ Kubernetes: `>= 1.19.0-0` | Repository | Name | Version | |------------|------|---------| -| | crds | 0.0.8 | +| | crds | 0.0.10 | ## Values @@ -29,7 +29,7 @@ Kubernetes: `>= 1.19.0-0` | frrk8s.disableCertRotation | bool | `false` | | | frrk8s.frr.image.pullPolicy | string | `nil` | | | frrk8s.frr.image.repository | string | `"quay.io/frrouting/frr"` | | -| frrk8s.frr.image.tag | string | `"8.4.2"` | | +| frrk8s.frr.image.tag | string | `"9.0.2"` | | | frrk8s.frr.metricsBindAddress | string | `"127.0.0.1"` | | | frrk8s.frr.metricsPort | int | `7573` | | | frrk8s.frr.resources | object | `{}` | | diff --git a/charts/metallb/metallb/charts/frr-k8s/charts/crds/Chart.yaml b/charts/metallb/metallb/charts/frr-k8s/charts/crds/Chart.yaml index 5b71b0509..6bf915a50 100644 --- a/charts/metallb/metallb/charts/frr-k8s/charts/crds/Chart.yaml +++ b/charts/metallb/metallb/charts/frr-k8s/charts/crds/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: v0.0.8 +appVersion: v0.0.10 description: FRR K8s CRDs home: https://metallb.universe.tf icon: https://metallb.universe.tf/images/logo/metallb-white.png @@ -7,4 +7,4 @@ name: crds sources: - https://github.com/metallb/frr-k8s type: application -version: 0.0.8 +version: 0.0.10 diff --git a/charts/metallb/metallb/charts/frr-k8s/charts/crds/templates/frrk8s.metallb.io_frrconfigurations.yaml b/charts/metallb/metallb/charts/frr-k8s/charts/crds/templates/frrk8s.metallb.io_frrconfigurations.yaml index 18c70f3e6..2978e367c 100644 --- a/charts/metallb/metallb/charts/frr-k8s/charts/crds/templates/frrk8s.metallb.io_frrconfigurations.yaml +++ b/charts/metallb/metallb/charts/frr-k8s/charts/crds/templates/frrk8s.metallb.io_frrconfigurations.yaml @@ -144,6 +144,20 @@ spec: BGP session. If not set, the BFD session won't be set up. type: string + connectTime: + description: Requested BGP connect time, controls + how long BGP waits between connection attempts to + a neighbor. + type: string + x-kubernetes-validations: + - message: connect time should be between 1 seconds + to 65535 + rule: duration(self).getSeconds() >= 1 && duration(self).getSeconds() + <= 65535 + - message: connect time should contain a whole number + of seconds + rule: duration(self).getMilliseconds() % 1000 == + 0 ebgpMultiHop: description: EBGPMultiHop indicates if the BGPPeer is multi-hops away. diff --git a/charts/metallb/metallb/charts/frr-k8s/templates/controller.yaml b/charts/metallb/metallb/charts/frr-k8s/templates/controller.yaml index 2d955da70..76dfcef7d 100644 --- a/charts/metallb/metallb/charts/frr-k8s/templates/controller.yaml +++ b/charts/metallb/metallb/charts/frr-k8s/templates/controller.yaml @@ -4,6 +4,7 @@ apiVersion: v1 kind: ConfigMap metadata: name: {{ template "frrk8s.fullname" . }}-frr-startup + namespace: {{ .Release.Namespace | quote }} labels: {{- include "frrk8s.labels" . | nindent 4 }} app.kubernetes.io/component: frr-k8s @@ -104,6 +105,7 @@ apiVersion: apps/v1 kind: DaemonSet metadata: name: {{ template "frrk8s.fullname" . }} + namespace: {{ .Release.Namespace | quote }} labels: {{- include "frrk8s.labels" . | nindent 4 }} app.kubernetes.io/component: frr-k8s diff --git a/charts/metallb/metallb/charts/frr-k8s/templates/rbac.yaml b/charts/metallb/metallb/charts/frr-k8s/templates/rbac.yaml index 20460142d..e9beef897 100644 --- a/charts/metallb/metallb/charts/frr-k8s/templates/rbac.yaml +++ b/charts/metallb/metallb/charts/frr-k8s/templates/rbac.yaml @@ -44,12 +44,13 @@ roleRef: subjects: - kind: ServiceAccount name: {{ include "frrk8s.serviceAccountName" . }} - namespace: {{ .Release.Namespace }} + namespace: {{ .Release.Namespace | quote }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: {{ include "frrk8s.fullname" . }}-controller + namespace: {{ .Release.Namespace | quote }} labels: {{- include "frrk8s.labels" . | nindent 4 }} rules: - apiGroups: [""] @@ -60,7 +61,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: {{ include "frrk8s.fullname" . }}-controller - namespace: {{ .Release.Namespace }} + namespace: {{ .Release.Namespace | quote }} labels: {{- include "frrk8s.labels" . | nindent 4 }} roleRef: apiGroup: rbac.authorization.k8s.io diff --git a/charts/metallb/metallb/charts/frr-k8s/templates/service-accounts.yaml b/charts/metallb/metallb/charts/frr-k8s/templates/service-accounts.yaml index 9fb46d156..3c64c5cb2 100644 --- a/charts/metallb/metallb/charts/frr-k8s/templates/service-accounts.yaml +++ b/charts/metallb/metallb/charts/frr-k8s/templates/service-accounts.yaml @@ -4,6 +4,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: {{ template "frrk8s.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} labels: {{- include "frrk8s.labels" . | nindent 4 }} app.kubernetes.io/component: controller diff --git a/charts/metallb/metallb/charts/frr-k8s/templates/webhooks.yaml b/charts/metallb/metallb/charts/frr-k8s/templates/webhooks.yaml index 3fa055bbb..3a4b9cd14 100644 --- a/charts/metallb/metallb/charts/frr-k8s/templates/webhooks.yaml +++ b/charts/metallb/metallb/charts/frr-k8s/templates/webhooks.yaml @@ -2,6 +2,7 @@ apiVersion: apps/v1 kind: Deployment metadata: name: {{ template "frrk8s.fullname" . }}-webhook-server + namespace: {{ .Release.Namespace | quote }} labels: {{- include "frrk8s.labels" . | nindent 4 }} app.kubernetes.io/component: frr-k8s-webhook-server @@ -117,11 +118,13 @@ apiVersion: v1 kind: Secret metadata: name: frr-k8s-webhook-server-cert + namespace: {{ .Release.Namespace | quote }} --- apiVersion: v1 kind: Service metadata: name: frr-k8s-webhook-service + namespace: {{ .Release.Namespace | quote }} spec: ports: - port: 443 diff --git a/charts/metallb/metallb/charts/frr-k8s/values.yaml b/charts/metallb/metallb/charts/frr-k8s/values.yaml index 5addc75f8..32d118b9c 100644 --- a/charts/metallb/metallb/charts/frr-k8s/values.yaml +++ b/charts/metallb/metallb/charts/frr-k8s/values.yaml @@ -158,7 +158,7 @@ frrk8s: frr: image: repository: quay.io/frrouting/frr - tag: 8.4.2 + tag: 9.0.2 pullPolicy: metricsBindAddress: 127.0.0.1 metricsPort: 7573 diff --git a/charts/metallb/metallb/templates/controller.yaml b/charts/metallb/metallb/templates/controller.yaml index bb79aeb64..d7d299e29 100644 --- a/charts/metallb/metallb/templates/controller.yaml +++ b/charts/metallb/metallb/templates/controller.yaml @@ -65,7 +65,6 @@ spec: {{- with .Values.controller.logLevel }} - --log-level={{ . }} {{- end }} - - --cert-service-name=metallb-webhook-service {{- if .Values.loadBalancerClass }} - --lb-class={{ .Values.loadBalancerClass }} {{- end }} @@ -186,7 +185,7 @@ spec: - name: cert secret: defaultMode: 420 - secretName: webhook-server-cert + secretName: metallb-webhook-cert {{- if .Values.prometheus.controllerMetricsTLSSecret }} - name: metrics-certs secret: diff --git a/charts/metallb/metallb/templates/exclude-l2-config.yaml b/charts/metallb/metallb/templates/exclude-l2-config.yaml index cacea8f6f..932c2d62a 100644 --- a/charts/metallb/metallb/templates/exclude-l2-config.yaml +++ b/charts/metallb/metallb/templates/exclude-l2-config.yaml @@ -1,9 +1,11 @@ -{{- if .Values.speaker.excludeInterfaces.enabled }} +{{- if and .Values.speaker.enabled .Values.speaker.excludeInterfaces.enabled }} apiVersion: v1 kind: ConfigMap metadata: name: metallb-excludel2 namespace: {{ .Release.Namespace | quote }} + labels: + {{- include "metallb.labels" . | nindent 4 }} data: excludel2.yaml: | announcedInterfacesToExclude: diff --git a/charts/metallb/metallb/templates/rbac.yaml b/charts/metallb/metallb/templates/rbac.yaml index 5a7d53e03..914ff82af 100644 --- a/charts/metallb/metallb/templates/rbac.yaml +++ b/charts/metallb/metallb/templates/rbac.yaml @@ -27,7 +27,7 @@ rules: verbs: ["list", "watch"] - apiGroups: ["apiextensions.k8s.io"] resources: ["customresourcedefinitions"] - resourceNames: ["addresspools.metallb.io","bfdprofiles.metallb.io","bgpadvertisements.metallb.io", + resourceNames: ["bfdprofiles.metallb.io","bgpadvertisements.metallb.io", "bgppeers.metallb.io","ipaddresspools.metallb.io","l2advertisements.metallb.io","communities.metallb.io"] verbs: ["create", "delete", "get", "list", "patch", "update", "watch"] - apiGroups: ["apiextensions.k8s.io"] @@ -58,6 +58,9 @@ rules: - apiGroups: [""] resources: ["events"] verbs: ["create", "patch"] +- apiGroups: ["metallb.io"] + resources: ["servicel2statuses","servicel2statuses/status"] + verbs: ["*"] {{- if .Values.prometheus.secureMetricsPort }} - apiGroups: ["authentication.k8s.io"] resources: ["tokenreviews"] @@ -83,9 +86,6 @@ rules: - apiGroups: [""] resources: ["configmaps"] verbs: ["get", "list", "watch"] -- apiGroups: ["metallb.io"] - resources: ["addresspools"] - verbs: ["get", "list", "watch"] - apiGroups: ["metallb.io"] resources: ["bfdprofiles"] verbs: ["get", "list", "watch"] @@ -133,9 +133,6 @@ rules: - apiGroups: [""] resources: ["secrets"] verbs: ["create", "delete", "get", "list", "patch", "update", "watch"] -- apiGroups: ["metallb.io"] - resources: ["addresspools"] - verbs: ["get", "list", "watch"] - apiGroups: ["metallb.io"] resources: ["ipaddresspools"] verbs: ["get", "list", "watch"] diff --git a/charts/metallb/metallb/templates/speaker.yaml b/charts/metallb/metallb/templates/speaker.yaml index 635aa0a80..ac7ee6298 100644 --- a/charts/metallb/metallb/templates/speaker.yaml +++ b/charts/metallb/metallb/templates/speaker.yaml @@ -165,6 +165,11 @@ spec: {{- toYaml .Values.speaker.securityContext | nindent 8 }} {{- end }} volumes: + {{- if .Values.prometheus.speakerMetricsTLSSecret }} + - name: metrics-certs + secret: + secretName: {{ .Values.prometheus.speakerMetricsTLSSecret }} + {{- end }} {{- if .Values.speaker.memberlist.enabled }} - name: memberlist secret: @@ -189,11 +194,6 @@ spec: emptyDir: {} - name: metrics emptyDir: {} - {{- if .Values.prometheus.speakerMetricsTLSSecret }} - - name: metrics-certs - secret: - secretName: {{ .Values.prometheus.speakerMetricsTLSSecret }} - {{- end }} initContainers: # Copies the initial config files with the right permissions to the shared volume. - name: cp-frr-files @@ -244,6 +244,12 @@ spec: {{- if .Values.speaker.wanConfig }} - --ml-wan-config {{- end }} + {{- if .Values.speaker.ignoreExcludeLB}} + - --ignore-exclude-lb + {{- end }} + {{- if .Values.prometheus.secureMetricsPort }} + - --host=localhost + {{- end }} env: - name: METALLB_NODE_NAME valueFrom: @@ -296,6 +302,9 @@ spec: {{- if .Values.speaker.livenessProbe.enabled }} livenessProbe: httpGet: + {{- if .Values.prometheus.secureMetricsPort }} + host: localhost + {{- end }} path: /metrics port: monitoring initialDelaySeconds: {{ .Values.speaker.livenessProbe.initialDelaySeconds }} @@ -307,6 +316,9 @@ spec: {{- if .Values.speaker.readinessProbe.enabled }} readinessProbe: httpGet: + {{- if .Values.prometheus.secureMetricsPort }} + host: localhost + {{- end }} path: /metrics port: monitoring initialDelaySeconds: {{ .Values.speaker.readinessProbe.initialDelaySeconds }} @@ -330,7 +342,7 @@ spec: {{- if or .Values.speaker.frr.enabled .Values.speaker.memberlist.enabled .Values.speaker.excludeInterfaces.enabled }} volumeMounts: {{- if .Values.speaker.memberlist.enabled }} - - name: memberlist + - name: memberlist mountPath: {{ .Values.speaker.memberlist.mlSecretKeyPath }} {{- end }} {{- if .Values.speaker.frr.enabled }} @@ -384,7 +396,10 @@ spec: {{- if .Values.speaker.livenessProbe.enabled }} livenessProbe: httpGet: - path: /livez + {{- if .Values.prometheus.secureMetricsPort }} + host: localhost + {{- end }} + path: livez port: {{ .Values.speaker.frr.metricsPort }} initialDelaySeconds: {{ .Values.speaker.livenessProbe.initialDelaySeconds }} periodSeconds: {{ .Values.speaker.livenessProbe.periodSeconds }} @@ -395,6 +410,9 @@ spec: {{- if .Values.speaker.startupProbe.enabled }} startupProbe: httpGet: + {{- if .Values.prometheus.secureMetricsPort }} + host: localhost + {{- end }} path: /livez port: {{ .Values.speaker.frr.metricsPort }} failureThreshold: {{ .Values.speaker.startupProbe.failureThreshold }} @@ -422,6 +440,9 @@ spec: command: ["/etc/frr_metrics/frr-metrics"] args: - --metrics-port={{ .Values.speaker.frr.metricsPort }} + {{- if .Values.prometheus.secureMetricsPort }} + - --host=localhost + {{- end }} ports: - containerPort: {{ .Values.speaker.frr.metricsPort }} name: monitoring @@ -444,7 +465,7 @@ spec: args: - --logtostderr - --secure-listen-address=:{{ .Values.prometheus.secureMetricsPort }} - - --upstream=http://$(METALLB_HOST):{{ .Values.prometheus.metricsPort }}/ + - --upstream=http://localhost:{{ .Values.prometheus.metricsPort }}/ - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 {{- if .Values.prometheus.speakerMetricsTLSSecret }} - --tls-private-key-file=/etc/metrics/tls.key @@ -453,11 +474,6 @@ spec: ports: - containerPort: {{ .Values.prometheus.secureMetricsPort }} name: metricshttps - env: - - name: METALLB_HOST - valueFrom: - fieldRef: - fieldPath: status.hostIP resources: requests: cpu: 10m @@ -479,7 +495,7 @@ spec: - --logtostderr - --secure-listen-address=:{{ .Values.speaker.frr.secureMetricsPort }} - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 - - --upstream=http://$(METALLB_HOST):{{ .Values.speaker.frr.metricsPort }}/ + - --upstream=http://localhost:{{ .Values.speaker.frr.metricsPort }}/ {{- if .Values.prometheus.speakerMetricsTLSSecret }} - --tls-private-key-file=/etc/metrics/tls.key - --tls-cert-file=/etc/metrics/tls.crt @@ -525,7 +541,7 @@ spec: operator: Exists - key: node-role.kubernetes.io/control-plane effect: NoSchedule - operator: Exists + operator: Exists {{- end }} {{- with .Values.speaker.tolerations }} {{- toYaml . | nindent 6 }} diff --git a/charts/metallb/metallb/templates/webhooks.yaml b/charts/metallb/metallb/templates/webhooks.yaml index 8eb0756e6..e708beea6 100644 --- a/charts/metallb/metallb/templates/webhooks.yaml +++ b/charts/metallb/metallb/templates/webhooks.yaml @@ -144,7 +144,7 @@ spec: apiVersion: v1 kind: Secret metadata: - name: webhook-server-cert + name: metallb-webhook-cert namespace: {{ .Release.Namespace | quote }} labels: {{- include "metallb.labels" . | nindent 4 }} diff --git a/charts/metallb/metallb/values.schema.json b/charts/metallb/metallb/values.schema.json index b6373532f..bc0dd8402 100644 --- a/charts/metallb/metallb/values.schema.json +++ b/charts/metallb/metallb/values.schema.json @@ -299,7 +299,7 @@ }, "required": [ "podMonitor", "prometheusRule" ] }, - "controller": { + "controller": { "allOf": [ { "$ref": "#/definitions/component" }, { "description": "MetalLB Controller", @@ -330,7 +330,7 @@ } ] }, - "speaker": { + "speaker": { "allOf": [ { "$ref": "#/definitions/component" }, { "description": "MetalLB Speaker", @@ -364,6 +364,9 @@ } } }, + "ignoreExcludeLB": { + "type": "boolean" + }, "updateStrategy": { "type": "object", "properties": { diff --git a/charts/metallb/metallb/values.yaml b/charts/metallb/metallb/values.yaml index 50c53cd4d..738f25f8a 100644 --- a/charts/metallb/metallb/values.yaml +++ b/charts/metallb/metallb/values.yaml @@ -268,6 +268,9 @@ speaker: mlSecretKeyPath: "/etc/ml_secret_key" excludeInterfaces: enabled: true + # ignore the exclude-from-external-loadbalancer label + ignoreExcludeLB: false + image: repository: quay.io/metallb/speaker tag: @@ -328,7 +331,7 @@ speaker: enabled: true image: repository: quay.io/frrouting/frr - tag: 8.5.2 + tag: 9.0.2 pullPolicy: metricsPort: 7473 resources: {} diff --git a/charts/new-relic/nri-bundle/Chart.lock b/charts/new-relic/nri-bundle/Chart.lock index d46228dea..f325378d9 100644 --- a/charts/new-relic/nri-bundle/Chart.lock +++ b/charts/new-relic/nri-bundle/Chart.lock @@ -1,36 +1,36 @@ dependencies: - name: newrelic-infrastructure repository: https://newrelic.github.io/nri-kubernetes - version: 3.32.0 + version: 3.33.1 - name: nri-prometheus repository: https://newrelic.github.io/nri-prometheus version: 2.1.17 - name: newrelic-prometheus-agent repository: https://newrelic.github.io/newrelic-prometheus-configurator - version: 1.11.0 + version: 1.12.0 - name: nri-metadata-injection repository: https://newrelic.github.io/k8s-metadata-injection - version: 4.18.2 + version: 4.18.3 - name: newrelic-k8s-metrics-adapter repository: https://newrelic.github.io/newrelic-k8s-metrics-adapter - version: 1.10.1 + version: 1.10.2 - name: kube-state-metrics repository: https://prometheus-community.github.io/helm-charts version: 5.12.1 - name: nri-kube-events repository: https://newrelic.github.io/nri-kube-events - version: 3.9.2 + version: 3.9.3 - name: newrelic-logging repository: https://newrelic.github.io/helm-charts version: 1.21.2 - name: newrelic-pixie repository: https://newrelic.github.io/helm-charts - version: 2.1.3 + version: 2.1.4 - name: pixie-operator-chart repository: https://pixie-operator-charts.storage.googleapis.com version: 0.1.4 - name: newrelic-infra-operator repository: https://newrelic.github.io/newrelic-infra-operator version: 2.10.0 -digest: sha256:cfa9040fb965fb13487710c241e8c8dca25727054c6ed51088692d7769eece11 -generated: "2024-03-11T21:57:30.13774149Z" +digest: sha256:dc652de5a270b79377130dd333bfd50284e34541cdfd32d4fbce0268f3c01c99 +generated: "2024-04-01T14:46:40.056684873Z" diff --git a/charts/new-relic/nri-bundle/Chart.yaml b/charts/new-relic/nri-bundle/Chart.yaml index 8d4a77412..ac02b83e9 100644 --- a/charts/new-relic/nri-bundle/Chart.yaml +++ b/charts/new-relic/nri-bundle/Chart.yaml @@ -7,7 +7,7 @@ dependencies: - condition: infrastructure.enabled,newrelic-infrastructure.enabled name: newrelic-infrastructure repository: file://./charts/newrelic-infrastructure - version: 3.32.0 + version: 3.33.1 - condition: prometheus.enabled,nri-prometheus.enabled name: nri-prometheus repository: file://./charts/nri-prometheus @@ -15,15 +15,15 @@ dependencies: - condition: newrelic-prometheus-agent.enabled name: newrelic-prometheus-agent repository: file://./charts/newrelic-prometheus-agent - version: 1.11.0 + version: 1.12.0 - condition: webhook.enabled,nri-metadata-injection.enabled name: nri-metadata-injection repository: file://./charts/nri-metadata-injection - version: 4.18.2 + version: 4.18.3 - condition: metrics-adapter.enabled,newrelic-k8s-metrics-adapter.enabled name: newrelic-k8s-metrics-adapter repository: file://./charts/newrelic-k8s-metrics-adapter - version: 1.10.1 + version: 1.10.2 - condition: ksm.enabled,kube-state-metrics.enabled name: kube-state-metrics repository: file://./charts/kube-state-metrics @@ -31,7 +31,7 @@ dependencies: - condition: kubeEvents.enabled,nri-kube-events.enabled name: nri-kube-events repository: file://./charts/nri-kube-events - version: 3.9.2 + version: 3.9.3 - condition: logging.enabled,newrelic-logging.enabled name: newrelic-logging repository: file://./charts/newrelic-logging @@ -39,7 +39,7 @@ dependencies: - condition: newrelic-pixie.enabled name: newrelic-pixie repository: file://./charts/newrelic-pixie - version: 2.1.3 + version: 2.1.4 - alias: pixie-chart condition: pixie-chart.enabled name: pixie-operator-chart @@ -62,6 +62,8 @@ maintainers: url: https://github.com/juanjjaramillo - name: csongnr url: https://github.com/csongnr +- name: dbudziwojskiNR + url: https://github.com/dbudziwojskiNR name: nri-bundle sources: - https://github.com/newrelic/nri-bundle/ @@ -75,4 +77,4 @@ sources: - https://github.com/newrelic/helm-charts/tree/master/charts/newrelic-logging - https://github.com/newrelic/helm-charts/tree/master/charts/newrelic-pixie - https://github.com/newrelic/newrelic-infra-operator/tree/master/charts/newrelic-infra-operator -version: 5.0.69 +version: 5.0.72 diff --git a/charts/new-relic/nri-bundle/README.md b/charts/new-relic/nri-bundle/README.md index 09679dfcb..f5f20b0f1 100644 --- a/charts/new-relic/nri-bundle/README.md +++ b/charts/new-relic/nri-bundle/README.md @@ -195,3 +195,4 @@ Note, the value table below is automatically generated from `values.yaml` by `he * [juanjjaramillo](https://github.com/juanjjaramillo) * [csongnr](https://github.com/csongnr) +* [dbudziwojskiNR](https://github.com/dbudziwojskiNR) diff --git a/charts/new-relic/nri-bundle/charts/newrelic-infrastructure/Chart.yaml b/charts/new-relic/nri-bundle/charts/newrelic-infrastructure/Chart.yaml index 58131c39b..19b6d8cc2 100644 --- a/charts/new-relic/nri-bundle/charts/newrelic-infrastructure/Chart.yaml +++ b/charts/new-relic/nri-bundle/charts/newrelic-infrastructure/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: 3.27.0 +appVersion: 3.28.1 dependencies: - name: common-library repository: https://helm-charts.newrelic.com @@ -23,4 +23,4 @@ sources: - https://github.com/newrelic/nri-kubernetes/ - https://github.com/newrelic/nri-kubernetes/tree/main/charts/newrelic-infrastructure - https://github.com/newrelic/infrastructure-agent/ -version: 3.32.0 +version: 3.33.1 diff --git a/charts/new-relic/nri-bundle/charts/newrelic-infrastructure/values.yaml b/charts/new-relic/nri-bundle/charts/newrelic-infrastructure/values.yaml index 743cf05b2..1094e7be8 100644 --- a/charts/new-relic/nri-bundle/charts/newrelic-infrastructure/values.yaml +++ b/charts/new-relic/nri-bundle/charts/newrelic-infrastructure/values.yaml @@ -23,14 +23,14 @@ images: forwarder: registry: "" repository: newrelic/k8s-events-forwarder - tag: 1.50.0 + tag: 1.51.0 pullPolicy: IfNotPresent # -- Image for the New Relic Infrastructure Agent plus integrations. # @default -- See `values.yaml` agent: registry: "" repository: newrelic/infrastructure-bundle - tag: 3.2.33 + tag: 3.2.36 pullPolicy: IfNotPresent # -- Image for the New Relic Kubernetes integration. # @default -- See `values.yaml` diff --git a/charts/new-relic/nri-bundle/charts/newrelic-k8s-metrics-adapter/Chart.yaml b/charts/new-relic/nri-bundle/charts/newrelic-k8s-metrics-adapter/Chart.yaml index c1039dc13..480a1e603 100644 --- a/charts/new-relic/nri-bundle/charts/newrelic-k8s-metrics-adapter/Chart.yaml +++ b/charts/new-relic/nri-bundle/charts/newrelic-k8s-metrics-adapter/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: 0.12.1 +appVersion: 0.12.2 dependencies: - name: common-library repository: https://helm-charts.newrelic.com @@ -22,4 +22,4 @@ name: newrelic-k8s-metrics-adapter sources: - https://github.com/newrelic/newrelic-k8s-metrics-adapter - https://github.com/newrelic/newrelic-k8s-metrics-adapter/tree/main/charts/newrelic-k8s-metrics-adapter -version: 1.10.1 +version: 1.10.2 diff --git a/charts/new-relic/nri-bundle/charts/newrelic-pixie/Chart.yaml b/charts/new-relic/nri-bundle/charts/newrelic-pixie/Chart.yaml index ee3b5d215..acd3077d4 100644 --- a/charts/new-relic/nri-bundle/charts/newrelic-pixie/Chart.yaml +++ b/charts/new-relic/nri-bundle/charts/newrelic-pixie/Chart.yaml @@ -11,13 +11,8 @@ maintainers: - name: nserrino - name: philkuz - name: htroisi -- name: juanjjaramillo -- name: svetlanabrennan -- name: nrepai -- name: csongnr - name: vuqtran88 -- name: xqi-nr name: newrelic-pixie sources: - https://github.com/newrelic/ -version: 2.1.3 +version: 2.1.4 diff --git a/charts/new-relic/nri-bundle/charts/newrelic-pixie/README.md b/charts/new-relic/nri-bundle/charts/newrelic-pixie/README.md index 949989ea9..228a3676d 100644 --- a/charts/new-relic/nri-bundle/charts/newrelic-pixie/README.md +++ b/charts/new-relic/nri-bundle/charts/newrelic-pixie/README.md @@ -23,6 +23,10 @@ IMPORTANT: In order to retrieve the Pixie cluster id from the `pl-cluster-secret | `image.pullSecrets` | Image pull secrets. | `nil` | | `customSecretApiKeyName` | Name of an existing Secret object, not created by this chart, where the Pixie API key is stored. | | | `customSecretApiKeyKey` | Key in the existing Secret object, indicated by `customSecretApiKeyName`, where the Pixie API key is stored. | | +| `podLabels` | Labels added to each Job pod | `{}` | +| `podAnnotations` | Annotations added to each Job pod | `{}` | +| `job.annotations` | Annotations added to the `newrelic-pixie` Job resource | `{}` | +| `job.labels` | Annotations added to the `newrelic-pixie` Job resource | `{}` | | `nodeSelector` | Node label to use for scheduling. | `{}` | | `tolerations` | List of node taints to tolerate (requires Kubernetes >= 1.6). | `[]` | | `affinity` | Node affinity to use for scheduling. | `{}` | diff --git a/charts/new-relic/nri-bundle/charts/newrelic-pixie/templates/job.yaml b/charts/new-relic/nri-bundle/charts/newrelic-pixie/templates/job.yaml index d70dc9ce1..89b97514f 100644 --- a/charts/new-relic/nri-bundle/charts/newrelic-pixie/templates/job.yaml +++ b/charts/new-relic/nri-bundle/charts/newrelic-pixie/templates/job.yaml @@ -4,8 +4,15 @@ kind: Job metadata: name: {{ template "newrelic-pixie.fullname" . }} namespace: {{ template "newrelic-pixie.namespace" . }} - labels: - {{- include "newrelic-pixie.labels" . | nindent 4 }} + labels: + {{- include "newrelic-pixie.labels" . | trim | nindent 4}} + {{- if ((.Values.job).labels) }} + {{- toYaml .Values.job.labels | nindent 4 }} + {{- end }} + {{- if ((.Values.job).annotations) }} + annotations: + {{ toYaml .Values.job.annotations | nindent 4 | trim }} + {{- end }} spec: backoffLimit: 4 ttlSecondsAfterFinished: 600 @@ -14,6 +21,13 @@ spec: labels: app.kubernetes.io/name: {{ template "newrelic-pixie.name" . }} release: {{.Release.Name }} + {{- if .Values.podLabels }} + {{- toYaml .Values.podLabels | nindent 8 }} + {{- end }} + {{- if .Values.podAnnotations }} + annotations: + {{- toYaml .Values.podAnnotations | nindent 8 }} + {{- end }} spec: {{- if .Values.image.pullSecrets }} imagePullSecrets: diff --git a/charts/new-relic/nri-bundle/charts/newrelic-pixie/values.yaml b/charts/new-relic/nri-bundle/charts/newrelic-pixie/values.yaml index e5f5445c5..4103d54e9 100644 --- a/charts/new-relic/nri-bundle/charts/newrelic-pixie/values.yaml +++ b/charts/new-relic/nri-bundle/charts/newrelic-pixie/values.yaml @@ -37,6 +37,17 @@ resources: cpu: 100m memory: 250M +# -- Annotations to add to the pod. +podAnnotations: {} +# -- Additional labels for chart pods +podLabels: {} + +job: + # job.annotations -- Annotations to add to the Job. + annotations: {} + # job.labels -- Labels to add to the Job. + labels: {} + proxy: {} nodeSelector: {} diff --git a/charts/new-relic/nri-bundle/charts/newrelic-prometheus-agent/CHANGELOG.md b/charts/new-relic/nri-bundle/charts/newrelic-prometheus-agent/CHANGELOG.md deleted file mode 100644 index 826f22cec..000000000 --- a/charts/new-relic/nri-bundle/charts/newrelic-prometheus-agent/CHANGELOG.md +++ /dev/null @@ -1,128 +0,0 @@ -# Changelog -All notable changes are documented in this file. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -## Unreleased -### enhancement -- Add linux node selector @dbudziwojskiNR [#362](https://github.com/newrelic/newrelic-prometheus-configurator/pull/362) - -## v1.3.0 - 2023-09-15 - -### ⛓️ Dependencies -- Updated newrelic/newrelic-prometheus-configurator to v1.6.0 - [Changelog 🔗](https://github.com/newrelic/newrelic-prometheus-configurator/releases/tag/1.6.0) - -## v1.2.3 - 2023-08-22 - -### ⛓️ Dependencies -- Updated newrelic/newrelic-prometheus-configurator to v1.5.0 - [Changelog 🔗](https://github.com/newrelic/newrelic-prometheus-configurator/releases/tag/1.5.0) - -## v1.2.2 - 2023-06-15 - -### ⛓️ Dependencies -- Updated newrelic/newrelic-prometheus-configurator to v1.4.2 - [Changelog 🔗](https://github.com/newrelic/newrelic-prometheus-configurator/releases/tag/1.4.2) - -## v1.2.1 - 2023-06-06 - -### ⛓️ Dependencies -- Updated newrelic/newrelic-prometheus-configurator to v1.4.1 - [Changelog 🔗](https://github.com/newrelic/newrelic-prometheus-configurator/releases/tag/1.4.1) - -## v1.2.0 - 2023-05-12 - -### ⛓️ Dependencies -- Updated newrelic/newrelic-prometheus-configurator to v1.4.0 - [Changelog 🔗](https://github.com/newrelic/newrelic-prometheus-configurator/releases/tag/1.4.0) - -## v1.1.1 - 2023-03-20 - -### ⛓️ Dependencies -- Updated common-library to v1.1.1 - [Changelog 🔗](https://github.com/newrelic/helm-charts/releases/tag/common-library-1.1.1) - -## v1.1.0 - 2023-01-30 - -### 🚀 Enhancements -- Set `NR_PROM_CHART_VERSION` env var in the configurator statefulset init container. - -### ⛓️ Dependencies -- Upgraded github.com/prometheus/prometheus from 0.37.3 to 0.37.5 - [Changelog 🔗](https://github.com/prometheus/prometheus/releases/tag/0.37.5) - -## v1.0.1 - 2022-11-30 - -### 🐞 Bug fixes -- whenever `config.kubernetes.integrations_filter.enabled: false` we should pass the list of `labels` and `app_values` to the configurator config. - -## v1.0.0 - 2022-11-29 - -### First stable release -- From now on the configuration is considered stable. - -### 🚀 Enhancements -- added `k8s-app` label in `integration_filters`. -- added `kube-dns` label value in `integration_filters` to cover `coreDNS` use-case. -- configurator version bumped to `1.0.0` - -### 🐞 Bug fixes -- chart readme was outdated with respect the new default behaviour of integrations_filters. - -### ⛓️ Dependencies -- Upgraded github.com/prometheus/prometheus from 0.37.2 to 0.37.3 [Changelog](https://github.com/prometheus/prometheus/releases/tag/0.37.3) - -## v0.3.1 - 2022-11-08 - -### 🚀 Enhancements -- the chart is now applying by default a series of relabel configs to fix metric types for Cockroach db service. -- the chart is now scraping by default Cockroach db service as well. - -### 🐞 Bug fixes -- updated appVersion of `quay.io/prometheus/prometheus` from v2.37.1 to v2.37.2 - -## v0.2.1 - 2022-11-03 - -### 🐞 Bug fixes -- `imagePullPolicy` is now correctly applied to the init container as well. - -## v0.2.0 - 2022-11-03 - -### Note, defaults of the chart changed -Now, the chart has two jobs configured and integration filters turned on by default: -- `default` scrapes all targets having `prometheus.io/scrape: true`. By default, `integrations_filter.enabled=true`, unless changed, only targets selected by the integration filters will be scraped. -- `newrelic` scrapes all targets having `newrelic.io/scrape: true`. This is useful to extend the `default` job allowlisting by adding the required annotation on each extra service. - -### 🚀 Enhancements -- `integration filters` option, is now supported and enabled by default. - -## v0.1.1 - 2022-10-20 - -### ⛓️ Dependencies -- Updated newrelic/newrelic-prometheus-configurator to v0.1.0 - -## v0.1.0 - 2022-10-17 - -### 🚀 Enhancements -- The chart is now published leveraging the release toolkit. -- The chart release notes from now on will be available in the chart package and in the GitHub release notes. - -## [0.0.6] - 2022-10-11 -### Changed -- Changed the default value for `extra_scrape_configs` and improved the documentation - -## [0.0.5] - 2022-10-06 -### Changed -- `newrelic-prometheus-configurator` image bumped `0.0.1` -> `0.0.2`. - -## [0.0.4] - 2022-09-30 -### Changed -- Rename chart `newrelic-prometheus` -> `newrelic-prometheus-agent`. - -## [0.0.3] - 2022-09-30 -### Changed -- Improve docs on readme and values.yaml. - -## [0.0.2] - 2022-09-21 -### Changed -- Update docs on readme. - -## [0.0.1] - 2022-09-20 -### Added -- First Version of this Chart. - diff --git a/charts/new-relic/nri-bundle/charts/newrelic-prometheus-agent/Chart.yaml b/charts/new-relic/nri-bundle/charts/newrelic-prometheus-agent/Chart.yaml index 8738fe593..30dddeb86 100644 --- a/charts/new-relic/nri-bundle/charts/newrelic-prometheus-agent/Chart.yaml +++ b/charts/new-relic/nri-bundle/charts/newrelic-prometheus-agent/Chart.yaml @@ -1,5 +1,5 @@ annotations: - configuratorVersion: 1.14.0 + configuratorVersion: 1.15.0 apiVersion: v2 appVersion: v2.37.8 dependencies: @@ -19,4 +19,4 @@ maintainers: url: https://github.com/dbudziwojskiNR name: newrelic-prometheus-agent type: application -version: 1.11.0 +version: 1.12.0 diff --git a/charts/new-relic/nri-bundle/charts/nri-kube-events/Chart.yaml b/charts/new-relic/nri-bundle/charts/nri-kube-events/Chart.yaml index 0a0b12f86..e83788a42 100644 --- a/charts/new-relic/nri-bundle/charts/nri-kube-events/Chart.yaml +++ b/charts/new-relic/nri-bundle/charts/nri-kube-events/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: 2.9.2 +appVersion: 2.9.3 dependencies: - name: common-library repository: https://helm-charts.newrelic.com @@ -23,4 +23,4 @@ sources: - https://github.com/newrelic/nri-kube-events/ - https://github.com/newrelic/nri-kube-events/tree/main/charts/nri-kube-events - https://github.com/newrelic/infrastructure-agent/ -version: 3.9.2 +version: 3.9.3 diff --git a/charts/new-relic/nri-bundle/charts/nri-kube-events/README.md b/charts/new-relic/nri-bundle/charts/nri-kube-events/README.md index 656deb7e9..e3a2737f5 100644 --- a/charts/new-relic/nri-bundle/charts/nri-kube-events/README.md +++ b/charts/new-relic/nri-bundle/charts/nri-kube-events/README.md @@ -1,6 +1,6 @@ # nri-kube-events -![Version: 3.9.2](https://img.shields.io/badge/Version-3.9.2-informational?style=flat-square) ![AppVersion: 2.9.2](https://img.shields.io/badge/AppVersion-2.9.2-informational?style=flat-square) +![Version: 3.9.3](https://img.shields.io/badge/Version-3.9.3-informational?style=flat-square) ![AppVersion: 2.9.3](https://img.shields.io/badge/AppVersion-2.9.3-informational?style=flat-square) A Helm chart to deploy the New Relic Kube Events router diff --git a/charts/new-relic/nri-bundle/charts/nri-metadata-injection/Chart.yaml b/charts/new-relic/nri-bundle/charts/nri-metadata-injection/Chart.yaml index a46408dcc..1416a8bb1 100644 --- a/charts/new-relic/nri-bundle/charts/nri-metadata-injection/Chart.yaml +++ b/charts/new-relic/nri-bundle/charts/nri-metadata-injection/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: 1.26.2 +appVersion: 1.26.3 dependencies: - name: common-library repository: https://helm-charts.newrelic.com @@ -22,4 +22,4 @@ name: nri-metadata-injection sources: - https://github.com/newrelic/k8s-metadata-injection - https://github.com/newrelic/k8s-metadata-injection/tree/master/charts/nri-metadata-injection -version: 4.18.2 +version: 4.18.3 diff --git a/charts/redpanda/redpanda/Chart.lock b/charts/redpanda/redpanda/Chart.lock index d4e101c5d..10b45096b 100644 --- a/charts/redpanda/redpanda/Chart.lock +++ b/charts/redpanda/redpanda/Chart.lock @@ -6,4 +6,4 @@ dependencies: repository: https://charts.redpanda.com version: 0.1.10 digest: sha256:9705ddcac0c386a44d8fa28cff078e52e0277f81e70db1c5c772303dcfb2ce69 -generated: "2024-03-22T16:33:22.867183926Z" +generated: "2024-04-03T09:59:43.884775713Z" diff --git a/charts/redpanda/redpanda/Chart.yaml b/charts/redpanda/redpanda/Chart.yaml index 656a529dc..0ec1fc356 100644 --- a/charts/redpanda/redpanda/Chart.yaml +++ b/charts/redpanda/redpanda/Chart.yaml @@ -1,7 +1,7 @@ annotations: artifacthub.io/images: | - name: redpanda - image: docker.redpanda.com/redpandadata/redpanda:v23.3.9 + image: docker.redpanda.com/redpandadata/redpanda:v23.3.10 - name: busybox image: busybox:latest - name: mintel/docker-alpine-bash-curl-jq @@ -17,7 +17,7 @@ annotations: catalog.cattle.io/kube-version: '>=1.21-0' catalog.cattle.io/release-name: redpanda apiVersion: v2 -appVersion: v23.3.9 +appVersion: v23.3.10 dependencies: - condition: console.enabled name: console @@ -37,4 +37,4 @@ name: redpanda sources: - https://github.com/redpanda-data/helm-charts type: application -version: 5.7.35 +version: 5.7.37 diff --git a/charts/redpanda/redpanda/templates/_helpers.go.tpl b/charts/redpanda/redpanda/templates/_helpers.go.tpl new file mode 100644 index 000000000..676032523 --- /dev/null +++ b/charts/redpanda/redpanda/templates/_helpers.go.tpl @@ -0,0 +1,178 @@ +{{- /* Generated from "helpers.go" */ -}} + +{{- define "redpanda.Chart" -}} +{{- $dot := (index .a 0) -}} +{{- range $_ := (list 1) -}} +{{- (dict "r" (get (fromJson (include "redpanda.cleanForK8s" (dict "a" (list (replace "+" "_" (printf "%s-%s" $dot.Chart.Name $dot.Chart.Version))) ))) "r")) | toJson -}} +{{- break -}} +{{- end -}} +{{- end -}} + +{{- define "redpanda.Name" -}} +{{- $dot := (index .a 0) -}} +{{- range $_ := (list 1) -}} +{{- $tmp_tuple_1 := (get (fromJson (include "_shims.compact" (dict "a" (list (get (fromJson (include "_shims.typetest" (dict "a" (list "string" (index $dot.Values "nameOverride")) ))) "r")) ))) "r") -}} +{{- $ok_2 := $tmp_tuple_1.T2 -}} +{{- $override_1 := $tmp_tuple_1.T1 -}} +{{- if (and $ok_2 (ne $override_1 "")) -}} +{{- (dict "r" (get (fromJson (include "redpanda.cleanForK8s" (dict "a" (list $override_1) ))) "r")) | toJson -}} +{{- break -}} +{{- end -}} +{{- (dict "r" (get (fromJson (include "redpanda.cleanForK8s" (dict "a" (list $dot.Chart.Name) ))) "r")) | toJson -}} +{{- break -}} +{{- end -}} +{{- end -}} + +{{- define "redpanda.Fullname" -}} +{{- $dot := (index .a 0) -}} +{{- range $_ := (list 1) -}} +{{- $tmp_tuple_2 := (get (fromJson (include "_shims.compact" (dict "a" (list (get (fromJson (include "_shims.typetest" (dict "a" (list "string" (index $dot.Values "fullnameOverride")) ))) "r")) ))) "r") -}} +{{- $ok_4 := $tmp_tuple_2.T2 -}} +{{- $override_3 := $tmp_tuple_2.T1 -}} +{{- if (and $ok_4 (ne $override_3 "")) -}} +{{- (dict "r" (get (fromJson (include "redpanda.cleanForK8s" (dict "a" (list $override_3) ))) "r")) | toJson -}} +{{- break -}} +{{- end -}} +{{- (dict "r" (get (fromJson (include "redpanda.cleanForK8s" (dict "a" (list (printf "%s" $dot.Release.Name)) ))) "r")) | toJson -}} +{{- break -}} +{{- end -}} +{{- end -}} + +{{- define "redpanda.FullLabels" -}} +{{- $dot := (index .a 0) -}} +{{- range $_ := (list 1) -}} +{{- $values := $dot.Values.AsMap -}} +{{- $labels := (dict ) -}} +{{- if (ne $values.commonLabels nil) -}} +{{- $labels = $values.commonLabels -}} +{{- end -}} +{{- $defaults := (dict "helm.sh/chart" (get (fromJson (include "redpanda.Chart" (dict "a" (list $dot) ))) "r") "app.kubernetes.io/name" (get (fromJson (include "redpanda.Name" (dict "a" (list $dot) ))) "r") "app.kubernetes.io/instance" $dot.Release.Name "app.kubernetes.io/managed-by" $dot.Release.Service "app.kubernetes.io/component" (get (fromJson (include "redpanda.Name" (dict "a" (list $dot) ))) "r") ) -}} +{{- (dict "r" (merge $defaults $labels)) | toJson -}} +{{- break -}} +{{- end -}} +{{- end -}} + +{{- define "redpanda.ServiceAccountName" -}} +{{- $dot := (index .a 0) -}} +{{- range $_ := (list 1) -}} +{{- $values := $dot.Values.AsMap -}} +{{- $serviceAccount := $values.serviceAccount -}} +{{- if (and $serviceAccount.create (ne $serviceAccount.name "")) -}} +{{- (dict "r" $serviceAccount.name) | toJson -}} +{{- break -}} +{{- else -}}{{- if $serviceAccount.create -}} +{{- (dict "r" (get (fromJson (include "redpanda.Fullname" (dict "a" (list $dot) ))) "r")) | toJson -}} +{{- break -}} +{{- else -}}{{- if (ne $serviceAccount.name "") -}} +{{- (dict "r" $serviceAccount.name) | toJson -}} +{{- break -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{- (dict "r" "default") | toJson -}} +{{- break -}} +{{- end -}} +{{- end -}} + +{{- define "redpanda.Tag" -}} +{{- $dot := (index .a 0) -}} +{{- range $_ := (list 1) -}} +{{- $values := $dot.Values.AsMap -}} +{{- $tag := (toString $values.image.tag) -}} +{{- if (eq $tag "") -}} +{{- $tag = $dot.Chart.AppVersion -}} +{{- end -}} +{{- $pattern := "^v(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$" -}} +{{- if (not (regexMatch $pattern $tag)) -}} +{{- $_ := (fail "image.tag must start with a 'v' and be a valid semver") -}} +{{- end -}} +{{- (dict "r" $tag) | toJson -}} +{{- break -}} +{{- end -}} +{{- end -}} + +{{- define "redpanda.ServiceName" -}} +{{- $dot := (index .a 0) -}} +{{- range $_ := (list 1) -}} +{{- $values := $dot.Values.AsMap -}} +{{- if (and (ne $values.service nil) (ne $values.service.name nil)) -}} +{{- (dict "r" (get (fromJson (include "redpanda.cleanForK8s" (dict "a" (list $values.service.name) ))) "r")) | toJson -}} +{{- break -}} +{{- end -}} +{{- (dict "r" (get (fromJson (include "redpanda.Fullname" (dict "a" (list $dot) ))) "r")) | toJson -}} +{{- break -}} +{{- end -}} +{{- end -}} + +{{- define "redpanda.InternalDomain" -}} +{{- $dot := (index .a 0) -}} +{{- range $_ := (list 1) -}} +{{- $values := $dot.Values.AsMap -}} +{{- $service := (get (fromJson (include "redpanda.ServiceName" (dict "a" (list $dot) ))) "r") -}} +{{- $ns := $dot.Release.Namespace -}} +{{- $domain := (trimSuffix "." $values.clusterDomain) -}} +{{- (dict "r" (printf "%s.%s.svc.%s." $service $ns $domain)) | toJson -}} +{{- break -}} +{{- end -}} +{{- end -}} + +{{- define "redpanda.TLSEnabled" -}} +{{- $dot := (index .a 0) -}} +{{- range $_ := (list 1) -}} +{{- $values := $dot.Values.AsMap -}} +{{- if (and (ne $values.tls.enabled nil) $values.tls.enabled) -}} +{{- (dict "r" true) | toJson -}} +{{- break -}} +{{- end -}} +{{- $listeners := (list "kafka" "admin" "schemaRegistry" "rpc" "http") -}} +{{- range $_, $listener := $listeners -}} +{{- $tlsCert := (dig "listeners" $listener "tls" "cert" false $dot.Values.AsMap) -}} +{{- $tlsEnabled := (dig "listeners" $listener "tls" "enabled" false $dot.Values.AsMap) -}} +{{- if (and (not (empty $tlsEnabled)) (not (empty $tlsCert))) -}} +{{- (dict "r" true) | toJson -}} +{{- break -}} +{{- end -}} +{{- $external := (dig "listeners" $listener "external" false $dot.Values.AsMap) -}} +{{- if (empty $external) -}} +{{- continue -}} +{{- end -}} +{{- $keys := (keys $external) -}} +{{- range $_, $key := $keys -}} +{{- $enabled := (dig "listeners" $listener "external" $key "enabled" false $dot.Values.AsMap) -}} +{{- $tlsCert := (dig "listeners" $listener "external" $key "tls" "cert" false $dot.Values.AsMap) -}} +{{- $tlsEnabled := (dig "listeners" $listener "external" $key "tls" "enabled" false $dot.Values.AsMap) -}} +{{- if (and (and (not (empty $enabled)) (not (empty $tlsCert))) (not (empty $tlsEnabled))) -}} +{{- (dict "r" true) | toJson -}} +{{- break -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{- (dict "r" false) | toJson -}} +{{- break -}} +{{- end -}} +{{- end -}} + +{{- define "redpanda.ClientAuthRequired" -}} +{{- $dot := (index .a 0) -}} +{{- range $_ := (list 1) -}} +{{- $listeners := (list "kafka" "admin" "schemaRegistry" "rpc" "http") -}} +{{- range $_, $listener := $listeners -}} +{{- $required := (dig $listener "tls" "requireClientAuth" false $dot.Values.AsMap) -}} +{{- if (not (empty $required)) -}} +{{- (dict "r" true) | toJson -}} +{{- break -}} +{{- end -}} +{{- end -}} +{{- (dict "r" false) | toJson -}} +{{- break -}} +{{- end -}} +{{- end -}} + +{{- define "redpanda.cleanForK8s" -}} +{{- $in := (index .a 0) -}} +{{- range $_ := (list 1) -}} +{{- (dict "r" (trimSuffix "-" (trunc 63 $in))) | toJson -}} +{{- break -}} +{{- end -}} +{{- end -}} + diff --git a/charts/redpanda/redpanda/templates/_helpers.tpl b/charts/redpanda/redpanda/templates/_helpers.tpl index 2cb0745da..77c0ff925 100644 --- a/charts/redpanda/redpanda/templates/_helpers.tpl +++ b/charts/redpanda/redpanda/templates/_helpers.tpl @@ -18,7 +18,7 @@ limitations under the License. Expand the name of the chart. */}} {{- define "redpanda.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- get ((include "redpanda.Name" (dict "a" (list .))) | fromJson) "r" }} {{- end -}} {{/* @@ -26,42 +26,28 @@ Create a default fully qualified app name. We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). */}} {{- define "redpanda.fullname" -}} -{{- if .Values.fullnameOverride -}} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s" .Release.Name | trunc 63 | trimSuffix "-" -}} -{{- end -}} +{{- get ((include "redpanda.Fullname" (dict "a" (list .))) | fromJson) "r" }} {{- end -}} {{/* Create a default service name */}} {{- define "redpanda.servicename" -}} -{{- if dig "service" "name" false .Values.AsMap -}} -{{- .Values.service.name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{ include "redpanda.fullname" . | trunc 63 | trimSuffix "-" -}} -{{- end -}} +{{- get ((include "redpanda.ServiceName" (dict "a" (list .))) | fromJson) "r" }} {{- end -}} {{/* full helm labels + common labels */}} {{- define "full.labels" -}} -{{ $required := dict -"helm.sh/chart" ( include "redpanda.chart" . ) -"app.kubernetes.io/name" ( include "redpanda.name" . ) -"app.kubernetes.io/instance" ( .Release.Name ) -"app.kubernetes.io/managed-by" ( .Release.Service ) -"app.kubernetes.io/component" ( include "redpanda.name" . ) }} -{{- toYaml ( merge $required .Values.commonLabels ) }} +{{- (get ((include "redpanda.FullLabels" (dict "a" (list .))) | fromJson) "r") | toYaml }} {{- end -}} {{/* Create chart name and version as used by the chart label. */}} {{- define "redpanda.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- get ((include "redpanda.Chart" (dict "a" (list .))) | fromJson) "r" }} {{- end }} {{/* @@ -75,36 +61,19 @@ Get the version of redpanda being used as an image Create the name of the service account to use */}} {{- define "redpanda.serviceAccountName" -}} -{{- if .Values.serviceAccount.create }} -{{- default (include "redpanda.fullname" .) .Values.serviceAccount.name }} -{{- else }} -{{- default "default" .Values.serviceAccount.name }} -{{- end }} +{{- get ((include "redpanda.ServiceAccountName" (dict "a" (list .))) | fromJson) "r" }} {{- end }} {{/* Use AppVersion if image.tag is not set */}} {{- define "redpanda.tag" -}} -{{- $tag := default .Chart.AppVersion .Values.image.tag -}} -{{- $matchString := "^v(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$" -}} -{{- $match := mustRegexMatch $matchString $tag -}} -{{- if not $match -}} - {{/* - This error message is for end users. This can also occur if - AppVersion doesn't start with a 'v' in Chart.yaml. - */}} - {{ fail "image.tag must start with a 'v' and be valid semver" }} -{{- end -}} -{{- $tag -}} +{{- get ((include "redpanda.Tag" (dict "a" (list .))) | fromJson) "r" }} {{- end -}} {{/* Generate internal fqdn */}} {{- define "redpanda.internal.domain" -}} -{{- $service := include "redpanda.servicename" . -}} -{{- $ns := .Release.Namespace -}} -{{- $domain := .Values.clusterDomain | trimSuffix "." -}} -{{- printf "%s.%s.svc.%s." $service $ns $domain -}} +{{- get ((include "redpanda.InternalDomain" (dict "a" (list .))) | fromJson) "r" }} {{- end -}} {{/* ConfigMap variables */}} @@ -168,27 +137,7 @@ Use AppVersion if image.tag is not set {{- end -}} {{- define "tls-enabled" -}} -{{- $tlsenabled := .Values.tls.enabled -}} -{{- if not $tlsenabled -}} - {{- range $listener := .Values.listeners -}} - {{- if and - (dig "tls" "enabled" false $listener) - (not (empty (dig "tls" "cert" "" $listener ))) - -}} - {{- $tlsenabled = true -}} - {{- end -}} - {{- if not $tlsenabled -}} - {{- range $external := $listener.external -}} - {{- if and - (dig "tls" "enabled" false $external) - (not (empty (dig "tls" "cert" "" $external))) - -}} - {{- $tlsenabled = true -}} - {{- end -}} - {{- end -}} - {{- end -}} - {{- end -}} -{{- end -}} +{{- $tlsenabled := get ((include "redpanda.TLSEnabled" (dict "a" (list .))) | fromJson) "r" }} {{- toJson (dict "bool" $tlsenabled) -}} {{- end -}} @@ -863,16 +812,8 @@ REDPANDA_SASL_USERNAME REDPANDA_SASL_PASSWORD REDPANDA_SASL_MECHANISM {{/* check if client auth is enabled for any of the listeners */}} {{- define "client-auth-required" -}} - {{- with .Values.listeners -}} - {{- $requireClientAuth := or - .kafka.tls.requireClientAuth - .admin.tls.requireClientAuth - .schemaRegistry.tls.requireClientAuth - .rpc.tls.requireClientAuth - .http.tls.requireClientAuth - -}} - {{- toJson (dict "bool" $requireClientAuth) -}} - {{- end -}} +{{- $requireClientAuth := get ((include "redpanda.ClientAuthRequired" (dict "a" (list .))) | fromJson) "r" }} +{{- toJson (dict "bool" $requireClientAuth) -}} {{- end -}} {{/* secret-ref-or-value diff --git a/charts/redpanda/redpanda/templates/_shims.tpl b/charts/redpanda/redpanda/templates/_shims.tpl new file mode 100644 index 000000000..130e24f6e --- /dev/null +++ b/charts/redpanda/redpanda/templates/_shims.tpl @@ -0,0 +1,34 @@ +{{- /* Generated from "" */ -}} + +{{- define "_shims.typetest" -}} +{{- $type := (index .a 0) -}} +{{- $value := (index .a 1) -}} +{{- dict "r" (list $value (typeIs $type $value)) | toJson -}} +{{- end -}} + +{{- define "_shims.dicttest" -}} +{{- $dict := (index .a 0) -}} +{{- $key := (index .a 1) -}} +{{- if (hasKey $dict $key) -}} +{{- (dict "r" (list (index $dict $key) true)) | toJson -}} +{{- else -}} +{{- (dict "r" (list "" false)) | toJson -}} +{{- end -}} +{{- end -}} + +{{- define "_shims.typeassertion" -}} +{{- $type := (index .a 0) -}} +{{- $value := (index .a 1) -}} +{{- if (not (typeIs $type $value)) -}} +{{- (fail "TODO MAKE THIS A NICE MESSAGE") -}} +{{- end -}} +{{- (dict "r" $value) | toJson -}} +{{- end -}} + +{{- define "_shims.compact" -}} +{{- $out := (dict) -}} +{{- range $i, $e := (index .a 0) }} +{{- $_ := (set $out (printf "T%d" (add1 $i)) $e) -}} +{{- end -}} +{{- (dict "r" $out) | toJson -}} +{{- end -}} diff --git a/charts/redpanda/redpanda/values.yaml b/charts/redpanda/redpanda/values.yaml index fa467605f..af6d621b6 100644 --- a/charts/redpanda/redpanda/values.yaml +++ b/charts/redpanda/redpanda/values.yaml @@ -806,8 +806,8 @@ tuning: # current value is below a certain threshold. This allows Redpanda to make as many # simultaneous IO requests as possible, increasing throughput. # - # When this option is enabled, Helm creates a privileged container. If your security profile does not allow this, - # see the [tuning documentation](https://docs.redpanda.com/docs/deploy/deployment-option/self-hosted/kubernetes/kubernetes-tune-workers/). + # When this option is enabled, Helm creates a privileged container. If your security profile does not allow this, you can disable this container by setting `tune_aio_events` to `false`. + # For more details, see the [tuning documentation](https://docs.redpanda.com/docs/deploy/deployment-option/self-hosted/kubernetes/kubernetes-tune-workers/). tune_aio_events: true # # Syncs NTP diff --git a/charts/speedscale/speedscale-operator/Chart.yaml b/charts/speedscale/speedscale-operator/Chart.yaml index 7d73fcf38..b416bc42a 100644 --- a/charts/speedscale/speedscale-operator/Chart.yaml +++ b/charts/speedscale/speedscale-operator/Chart.yaml @@ -4,7 +4,7 @@ annotations: catalog.cattle.io/kube-version: '>= 1.17.0-0' catalog.cattle.io/release-name: speedscale-operator apiVersion: v1 -appVersion: 2.1.136 +appVersion: 2.1.186 description: Stress test your APIs with real world scenarios. Collect and replay traffic without scripting. home: https://speedscale.com @@ -24,4 +24,4 @@ maintainers: - email: support@speedscale.com name: Speedscale Support name: speedscale-operator -version: 2.1.12 +version: 2.1.15 diff --git a/charts/speedscale/speedscale-operator/README.md b/charts/speedscale/speedscale-operator/README.md index bcbf750f0..6d60fb8dd 100644 --- a/charts/speedscale/speedscale-operator/README.md +++ b/charts/speedscale/speedscale-operator/README.md @@ -101,10 +101,10 @@ _See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documen A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an incompatible breaking change needing manual actions. -### Upgrade to 2.1.12 +### Upgrade to 2.1.15 ```bash -kubectl apply --server-side -f https://raw.githubusercontent.com/speedscale/operator-helm/main/2.1.12/templates/crds/trafficreplays.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/speedscale/operator-helm/main/2.1.15/templates/crds/trafficreplays.yaml ``` ### Upgrade to 1.1.0 diff --git a/charts/speedscale/speedscale-operator/app-readme.md b/charts/speedscale/speedscale-operator/app-readme.md index bcbf750f0..6d60fb8dd 100644 --- a/charts/speedscale/speedscale-operator/app-readme.md +++ b/charts/speedscale/speedscale-operator/app-readme.md @@ -101,10 +101,10 @@ _See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documen A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an incompatible breaking change needing manual actions. -### Upgrade to 2.1.12 +### Upgrade to 2.1.15 ```bash -kubectl apply --server-side -f https://raw.githubusercontent.com/speedscale/operator-helm/main/2.1.12/templates/crds/trafficreplays.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/speedscale/operator-helm/main/2.1.15/templates/crds/trafficreplays.yaml ``` ### Upgrade to 1.1.0 diff --git a/charts/speedscale/speedscale-operator/templates/crds/trafficreplays.yaml b/charts/speedscale/speedscale-operator/templates/crds/trafficreplays.yaml index 213747b1b..ca82f5ac5 100644 --- a/charts/speedscale/speedscale-operator/templates/crds/trafficreplays.yaml +++ b/charts/speedscale/speedscale-operator/templates/crds/trafficreplays.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.15.0 + controller-gen.kubebuilder.io/version: v0.14.0 creationTimestamp: null name: trafficreplays.speedscale.com spec: @@ -35,14 +35,19 @@ spec: description: TrafficReplay is the Schema for the trafficreplays API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -50,32 +55,40 @@ spec: description: TrafficReplaySpec defines the desired state of TrafficReplay properties: buildTag: - description: BuildTag links a unique tag, build hash, etc. to the - generated traffic replay report. That way you can connect the report - results to the version of the code that was tested. + description: |- + BuildTag links a unique tag, build hash, etc. to the generated + traffic replay report. That way you can connect the report results to the + version of the code that was tested. type: string cleanup: - description: Cleanup is the name of cleanup mode used for this TrafficReplay. + description: |- + Cleanup is the name of cleanup mode used for this + TrafficReplay. enum: - inventory - all - none type: string collectLogs: - description: 'CollectLogs enables or disables log collection from - target workload. Defaults to true. DEPRECATED: use TestReport.ActualConfig.Cluster.CollectLogs' + description: |- + CollectLogs enables or disables log collection from target + workload. Defaults to true. + DEPRECATED: use TestReport.ActualConfig.Cluster.CollectLogs type: boolean configChecksum: - description: ConfigChecksum, managed my the operator, is the SHA1 - checksum of the configuration. + description: |- + ConfigChecksum, managed my the operator, is the SHA1 checksum of the + configuration. type: string customURL: description: CustomURL allows to specify custom URL to the SUT. type: string generatorLowData: - description: GeneratorLowData forces the generator into a high efficiency/low - data output mode. This is ideal for high volume performance tests. - Defaults to false. DEPRECATED + description: |- + GeneratorLowData forces the generator into a high + efficiency/low data output mode. This is ideal for high volume + performance tests. Defaults to false. + DEPRECATED type: boolean mode: description: Mode is the name of replay mode used for this TrafficReplay. @@ -88,21 +101,26 @@ spec: description: Indicates whether a responder-only replay needs a report. type: boolean proxyMode: - description: ProxyMode defines proxy operational mode used with injected - sidecar. DEPRECATED + description: |- + ProxyMode defines proxy operational mode used with injected sidecar. + DEPRECATED type: string responderLowData: - description: ResponderLowData forces the responder into a high efficiency/low - data output mode. This is ideal for high volume performance tests. - Defaults to false. DEPRECATED + description: |- + ResponderLowData forces the responder into a high + efficiency/low data output mode. This is ideal for high volume + performance tests. Defaults to false. + DEPRECATED type: boolean secretRefs: - description: SecretRefs hold the references to the secrets which contain + description: |- + SecretRefs hold the references to the secrets which contain various secrets like (e.g. short-lived JWTs to be used by the generator for authorization with HTTP calls). items: - description: LocalObjectReference contains enough information to - locate the referenced Kubernetes resource object. + description: |- + LocalObjectReference contains enough information to locate the referenced + Kubernetes resource object. properties: name: description: Name of the referent. @@ -112,12 +130,14 @@ spec: type: object type: array sidecar: - description: 'Sidecar defines sidecar specific configuration. DEPRECATED: - use Workloads' + description: |- + Sidecar defines sidecar specific configuration. + DEPRECATED: use Workloads properties: inject: - description: Inject enables or disables sidecar injection during - the replay. Defaults to false. + description: |- + Inject enables or disables sidecar injection during the replay. + Defaults to false. type: boolean patch: description: Patch is .yaml file patch for the Workload @@ -159,107 +179,121 @@ spec: type: string type: object out: - description: Out enables or disables TLS out on the sidecar - during replay. + description: |- + Out enables or disables TLS out on the + sidecar during replay. type: boolean type: object type: object snapshotID: - description: SnapshotID is the id of the traffic snapshot for this + description: |- + SnapshotID is the id of the traffic snapshot for this TrafficReplay. type: string testConfigID: - description: TestConfigID is the id of the replay configuration to - be used by the generator and responder for the TrafficReplay. + description: |- + TestConfigID is the id of the replay configuration to be used + by the generator and responder for the TrafficReplay. type: string timeout: - description: Timeout is the time to wait for replay test to finish. - Defaults to value of the `TIMEOUT` setting of the operator. + description: |- + Timeout is the time to wait for replay test to finish. Defaults + to value of the `TIMEOUT` setting of the operator. type: string ttlAfterReady: - description: TTLAfterReady provides a TTL (time to live) mechanism - to limit the lifetime of TrafficReplay object that have finished - the execution and reached its final state (either complete or failed). + description: |- + TTLAfterReady provides a TTL (time to live) mechanism to limit + the lifetime of TrafficReplay object that have finished the execution and + reached its final state (either complete or failed). type: string workloadRef: - description: 'WorkloadRef is the reference to the target workload - (SUT) for TrafficReplay. The operations will be performed in the - namespace of the target object. DEPRECATED: use Workloads' + description: |- + WorkloadRef is the reference to the target workload (SUT) for + TrafficReplay. The operations will be performed in the namespace of the + target object. + DEPRECATED: use Workloads properties: apiVersion: - description: API version of the referent + description: API version of the referenced object. type: string kind: - description: Kind of the referent + description: Kind of the referenced object. Defaults to "Deployment". type: string name: - description: Name of the referent + description: Name of the referenced object. type: string namespace: - description: Namespace of the referent, defaults to the TrafficReplay - namespace + description: Namespace of the referenced object. Defaults to the + TrafficReplay namespace. type: string required: - - kind - name type: object workloads: - description: Workloads define target workloads (SUT) for a TrafficReplay. - Many workloads may be provided, or none. Workloads may be modified - and restarted during replay to configure communication with a responder. + description: |- + Workloads define target workloads (SUT) for a TrafficReplay. Many + workloads may be provided, or none. Workloads may be modified and + restarted during replay to configure communication with a responder. items: - description: Workload represents a Kubernetes workload to be targeted - during replay and associated settings. + description: |- + Workload represents a Kubernetes workload to be targeted during replay and + associated settings. properties: inTrafficKey: description: 'DEPRECATED: use InTrafficKeys' type: string inTrafficKeys: - description: "InTrafficKeys are used to identify slices of inbound - snapshot traffic this workload is targeting and maps directly - to a snapshot's `InTraffic` field. Snapshot ingress traffic - can be split across multiple slices where each slice contains - part of the traffic. A key must only be specified once across - all workloads, but a workload may specify multiple keys. \n - This field is optional in the spec to provide support for - single-workload and legacy replays, but must be specified - for multi-workload replays in order to provide deterministic - replay configuration." + description: 'DEPRECATED: use Tests' + items: + type: string + type: array + mocks: + description: |- + Mocks are strings used to identify slices of outbound snapshot traffic to + mock for this workload and maps directly to a snapshot's `OutTraffic` + field. Snapshot egress traffic can be split across multiple slices where + each slice contains part of the traffic. A workload may specify multiple + keys and multiple workloads may specify the same key. + + + Only the traffic slices defined here will be mocked. A workload with no + keys defined will not mock any traffic. Pass '*' to mock all traffic. + + + Mock strings may only match part of the snapshot's `OutTraffic` key if the + string matches exactly one key. For example, the test string + `foo.example.com` would match the `OutTraffic` key of + my-service:foo.example.com:8080, as long as no other keys would match + `foo.example.com`. Multiple mocks must be specified for multiple keys + unless using '*'. items: type: string type: array outTrafficKeys: - description: "OutTrafficKeys are used to identify slices of - outbound snapshot traffic to mock for this workload and maps - directly to a snapshot's `OutTraffic` field. Snapshot egress - traffic can be split across multiple slices where each slice - contains part of the traffic. A workload may specify multiple - keys and multiple workloads may specify the same key. \n Only - the traffic slices defined here will be mocked. A workload - with no keys defined will not mock any traffic. Pass '*' - to mock all traffic." + description: 'DEPRECATED: use Mocks' items: type: string type: array ref: - description: Ref is a reference to a cluster workload, like - a deployment or a statefulset. + description: |- + Ref is a reference to a cluster workload, like a deployment or a + statefulset. properties: apiVersion: - description: API version of the referent + description: API version of the referenced object. type: string kind: - description: Kind of the referent + description: Kind of the referenced object. Defaults to + "Deployment". type: string name: - description: Name of the referent + description: Name of the referenced object. type: string namespace: - description: Namespace of the referent, defaults to the - TrafficReplay namespace + description: Namespace of the referenced object. Defaults + to the TrafficReplay namespace. type: string required: - - kind - name type: object routing: @@ -270,13 +304,14 @@ spec: - nat type: string sidecar: - description: 'TODO: this is not implemented, come back and replace - deprecated Sidecar with workload specific settings Sidecar - defines sidecar specific configuration.' + description: |- + TODO: this is not implemented, come back and replace deprecated Sidecar with workload specific settings + Sidecar defines sidecar specific configuration. properties: inject: - description: Inject enables or disables sidecar injection - during the replay. Defaults to false. + description: |- + Inject enables or disables sidecar injection during the replay. + Defaults to false. type: boolean patch: description: Patch is .yaml file patch for the Workload @@ -319,11 +354,34 @@ spec: type: string type: object out: - description: Out enables or disables TLS out on the + description: |- + Out enables or disables TLS out on the sidecar during replay. type: boolean type: object type: object + tests: + description: |- + Tests are strings used to identify slices of inbound snapshot traffic this + workload is targeting and maps directly to a snapshot's `InTraffic` field. + Snapshot ingress traffic can be split across multiple slices where each + slice contains part of the traffic. A key must only be specified once + across all workloads, but a workload may specify multiple keys. + + + Test strings may only match part of the snapshot's `InTraffic` key if the + string matches exactly one key. For example, the test string + `foo.example.com` would match the `InTraffic` key of + my-service:foo.example.com:8080, as long as no other keys would match + `foo.example.com` + + + This field is optional in the spec to provide support for single-workload + and legacy replays, but must be specified for multi-workload replays in + order to provide deterministic replay configuration. + items: + type: string + type: array type: object type: array required: @@ -342,42 +400,42 @@ spec: conditions: items: description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + state of this API Resource.\n---\nThis struct is intended for + direct use as an array at the field path .status.conditions. For + example,\n\n\n\ttype FooStatus struct{\n\t // Represents the + observations of a foo's current state.\n\t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // + +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t + \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t + \ // other fields\n\t}" properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 @@ -391,11 +449,12 @@ spec: - Unknown type: string type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + --- + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + useful (see .node.status.conditions), the ability to deconflict is important. + The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -445,5 +504,5 @@ status: acceptedNames: kind: "" plural: "" - conditions: [] - storedVersions: [] + conditions: null + storedVersions: null diff --git a/charts/speedscale/speedscale-operator/templates/tls.yaml b/charts/speedscale/speedscale-operator/templates/tls.yaml index 495bc0586..4a2456288 100644 --- a/charts/speedscale/speedscale-operator/templates/tls.yaml +++ b/charts/speedscale/speedscale-operator/templates/tls.yaml @@ -45,9 +45,9 @@ spec: containers: - args: - |- - keytool -importcert -noprompt -cacerts -storepass changeit -alias speedscale -file /etc/ssl/speedscale/tls.crt + keytool -keystore /usr/lib/jvm/jre/lib/security/cacerts -importcert -noprompt -trustcacerts -storepass changeit -alias speedscale -file /etc/ssl/speedscale/tls.crt kubectl -n ${POD_NAMESPACE} delete secret speedscale-jks || true - kubectl -n ${POD_NAMESPACE} create secret generic speedscale-jks --from-file=cacerts.jks=${JAVA_HOME}/lib/security/cacerts + kubectl -n ${POD_NAMESPACE} create secret generic speedscale-jks --from-file=cacerts.jks=/usr/lib/jvm/jre/lib/security/cacerts # in case we're in istio curl -X POST http://127.0.0.1:15000/quitquitquit || true diff --git a/charts/speedscale/speedscale-operator/values.yaml b/charts/speedscale/speedscale-operator/values.yaml index 4cff9245c..97c4fc5d1 100644 --- a/charts/speedscale/speedscale-operator/values.yaml +++ b/charts/speedscale/speedscale-operator/values.yaml @@ -20,7 +20,7 @@ clusterName: "my-cluster" # Speedscale components image settings. image: registry: gcr.io/speedscale - tag: v2.1.136 + tag: v2.1.186 pullPolicy: Always # Log level for Speedscale components. diff --git a/charts/stackstate/stackstate-k8s-agent/Chart.yaml b/charts/stackstate/stackstate-k8s-agent/Chart.yaml index a7b7f8be6..e4b2151ba 100644 --- a/charts/stackstate/stackstate-k8s-agent/Chart.yaml +++ b/charts/stackstate/stackstate-k8s-agent/Chart.yaml @@ -21,4 +21,4 @@ maintainers: - email: ops@stackstate.com name: Stackstate name: stackstate-k8s-agent -version: 1.0.76 +version: 1.0.78 diff --git a/charts/stackstate/stackstate-k8s-agent/README.md b/charts/stackstate/stackstate-k8s-agent/README.md index 27dbffafd..754d27ac3 100644 --- a/charts/stackstate/stackstate-k8s-agent/README.md +++ b/charts/stackstate/stackstate-k8s-agent/README.md @@ -2,7 +2,7 @@ Helm chart for the StackState Agent. -Current chart version is `1.0.76` +Current chart version is `1.0.78` **Homepage:** @@ -61,7 +61,7 @@ stackstate/stackstate-k8s-agent | checksAgent.enabled | bool | `true` | Enable / disable runnning cluster checks in a separately deployed pod | | checksAgent.image.pullPolicy | string | `"IfNotPresent"` | Default container image pull policy. | | checksAgent.image.repository | string | `"stackstate/stackstate-k8s-agent"` | Base container image repository. | -| checksAgent.image.tag | string | `"3bc9e882"` | Default container image tag. | +| checksAgent.image.tag | string | `"80ded79e"` | Default container image tag. | | checksAgent.livenessProbe.enabled | bool | `true` | Enable use of livenessProbe check. | | checksAgent.livenessProbe.failureThreshold | int | `3` | `failureThreshold` for the liveness probe. | | checksAgent.livenessProbe.initialDelaySeconds | int | `15` | `initialDelaySeconds` for the liveness probe. | @@ -126,7 +126,7 @@ stackstate/stackstate-k8s-agent | clusterAgent.enabled | bool | `true` | Enable / disable the cluster agent. | | clusterAgent.image.pullPolicy | string | `"IfNotPresent"` | Default container image pull policy. | | clusterAgent.image.repository | string | `"stackstate/stackstate-k8s-cluster-agent"` | Base container image repository. | -| clusterAgent.image.tag | string | `"3bc9e882"` | Default container image tag. | +| clusterAgent.image.tag | string | `"80ded79e"` | Default container image tag. | | clusterAgent.livenessProbe.enabled | bool | `true` | Enable use of livenessProbe check. | | clusterAgent.livenessProbe.failureThreshold | int | `3` | `failureThreshold` for the liveness probe. | | clusterAgent.livenessProbe.initialDelaySeconds | int | `15` | `initialDelaySeconds` for the liveness probe. | @@ -188,7 +188,7 @@ stackstate/stackstate-k8s-agent | nodeAgent.containers.agent.env | object | `{}` | Additional environment variables for the agent container | | nodeAgent.containers.agent.image.pullPolicy | string | `"IfNotPresent"` | Default container image pull policy. | | nodeAgent.containers.agent.image.repository | string | `"stackstate/stackstate-k8s-agent"` | Base container image repository. | -| nodeAgent.containers.agent.image.tag | string | `"3bc9e882"` | Default container image tag. | +| nodeAgent.containers.agent.image.tag | string | `"80ded79e"` | Default container image tag. | | nodeAgent.containers.agent.livenessProbe.enabled | bool | `true` | Enable use of livenessProbe check. | | nodeAgent.containers.agent.livenessProbe.failureThreshold | int | `3` | `failureThreshold` for the liveness probe. | | nodeAgent.containers.agent.livenessProbe.initialDelaySeconds | int | `15` | `initialDelaySeconds` for the liveness probe. | @@ -212,7 +212,7 @@ stackstate/stackstate-k8s-agent | nodeAgent.containers.processAgent.image.pullPolicy | string | `"IfNotPresent"` | Process-agent container image pull policy. | | nodeAgent.containers.processAgent.image.registry | string | `nil` | | | nodeAgent.containers.processAgent.image.repository | string | `"stackstate/stackstate-k8s-process-agent"` | Process-agent container image repository. | -| nodeAgent.containers.processAgent.image.tag | string | `"2df5d4d6"` | Default process-agent container image tag. | +| nodeAgent.containers.processAgent.image.tag | string | `"ae5d42d2"` | Default process-agent container image tag. | | nodeAgent.containers.processAgent.logLevel | string | `nil` | Set logging verbosity, valid log levels are: trace, debug, info, warn, error, critical, and off # If not set, fall back to the value of agent.logLevel. | | nodeAgent.containers.processAgent.procVolumeReadOnly | bool | `true` | Configure whether /host/proc is read only for the process agent container | | nodeAgent.containers.processAgent.resources.limits.cpu | string | `"125m"` | Memory resource limits. | diff --git a/charts/stackstate/stackstate-k8s-agent/values.yaml b/charts/stackstate/stackstate-k8s-agent/values.yaml index a5e1ce39d..3bc6eb4c7 100644 --- a/charts/stackstate/stackstate-k8s-agent/values.yaml +++ b/charts/stackstate/stackstate-k8s-agent/values.yaml @@ -104,7 +104,7 @@ nodeAgent: # nodeAgent.containers.agent.image.repository -- Base container image repository. repository: stackstate/stackstate-k8s-agent # nodeAgent.containers.agent.image.tag -- Default container image tag. - tag: "3bc9e882" + tag: "80ded79e" # nodeAgent.containers.agent.image.pullPolicy -- Default container image pull policy. pullPolicy: IfNotPresent processAgent: @@ -163,7 +163,7 @@ nodeAgent: # nodeAgent.containers.processAgent.image.repository -- Process-agent container image repository. repository: stackstate/stackstate-k8s-process-agent # nodeAgent.containers.processAgent.image.tag -- Default process-agent container image tag. - tag: "2df5d4d6" + tag: "ae5d42d2" # nodeAgent.containers.processAgent.image.pullPolicy -- Process-agent container image pull policy. pullPolicy: IfNotPresent # nodeAgent.containers.processAgent.env -- Additional environment variables for the process-agent container @@ -352,7 +352,7 @@ clusterAgent: # clusterAgent.image.repository -- Base container image repository. repository: stackstate/stackstate-k8s-cluster-agent # clusterAgent.image.tag -- Default container image tag. - tag: "3bc9e882" + tag: "80ded79e" # clusterAgent.image.pullPolicy -- Default container image pull policy. pullPolicy: IfNotPresent @@ -507,7 +507,7 @@ checksAgent: # checksAgent.image.repository -- Base container image repository. repository: stackstate/stackstate-k8s-agent # checksAgent.image.tag -- Default container image tag. - tag: "3bc9e882" + tag: "80ded79e" # checksAgent.image.pullPolicy -- Default container image pull policy. pullPolicy: IfNotPresent diff --git a/charts/yugabyte/yugabyte/Chart.yaml b/charts/yugabyte/yugabyte/Chart.yaml index 2b7054045..d3e798ca6 100644 --- a/charts/yugabyte/yugabyte/Chart.yaml +++ b/charts/yugabyte/yugabyte/Chart.yaml @@ -5,7 +5,7 @@ annotations: catalog.cattle.io/release-name: yugabyte charts.openshift.io/name: yugabyte apiVersion: v2 -appVersion: 2.18.6.0-b73 +appVersion: 2.18.7.0-b30 description: YugabyteDB is the high-performance distributed SQL database for building global, internet-scale apps. home: https://www.yugabyte.com @@ -19,4 +19,4 @@ maintainers: name: yugabyte sources: - https://github.com/yugabyte/yugabyte-db -version: 2.18.6 +version: 2.18.7 diff --git a/charts/yugabyte/yugabyte/app-readme.md b/charts/yugabyte/yugabyte/app-readme.md index edad7f89e..caf278c71 100644 --- a/charts/yugabyte/yugabyte/app-readme.md +++ b/charts/yugabyte/yugabyte/app-readme.md @@ -1 +1 @@ -This chart bootstraps an RF3 YugabyteDB version 2.18.6.0-b73 cluster using the Helm Package Manager. +This chart bootstraps an RF3 YugabyteDB version 2.18.7.0-b30 cluster using the Helm Package Manager. diff --git a/charts/yugabyte/yugabyte/values.yaml b/charts/yugabyte/yugabyte/values.yaml index 8167c76be..3c14874de 100644 --- a/charts/yugabyte/yugabyte/values.yaml +++ b/charts/yugabyte/yugabyte/values.yaml @@ -8,7 +8,7 @@ nameOverride: "" Image: repository: "yugabytedb/yugabyte" - tag: 2.18.6.0-b73 + tag: 2.18.7.0-b30 pullPolicy: IfNotPresent pullSecretName: "" diff --git a/charts/yugabyte/yugaware/Chart.yaml b/charts/yugabyte/yugaware/Chart.yaml index 228eaef2f..1d73d3fca 100644 --- a/charts/yugabyte/yugaware/Chart.yaml +++ b/charts/yugabyte/yugaware/Chart.yaml @@ -5,7 +5,7 @@ annotations: catalog.cattle.io/release-name: yugaware charts.openshift.io/name: yugaware apiVersion: v2 -appVersion: 2.18.6.0-b73 +appVersion: 2.18.7.0-b30 description: YugabyteDB Anywhere provides deployment, orchestration, and monitoring for managing YugabyteDB clusters. YugabyteDB Anywhere can create a YugabyteDB cluster with multiple pods provided by Kubernetes or OpenShift and logically grouped together @@ -19,4 +19,4 @@ maintainers: - email: gjalla@yugabyte.com name: Govardhan Reddy Jalla name: yugaware -version: 2.18.6 +version: 2.18.7 diff --git a/charts/yugabyte/yugaware/templates/configs.yaml b/charts/yugabyte/yugaware/templates/configs.yaml index 5c67697fc..aa2f3d7c2 100644 --- a/charts/yugabyte/yugaware/templates/configs.yaml +++ b/charts/yugabyte/yugaware/templates/configs.yaml @@ -433,6 +433,20 @@ data: {{- end }} {{- end }} + {{- if .Values.tls.enabled }} + + - job_name: 'platform' + metrics_path: "/api/v1/prometheus_metrics" + scheme: https + tls_config: + insecure_skip_verify: true + static_configs: + - targets: [ + '{{ eq .Values.ip_version_support "v6_only" | ternary "[::1]" "127.0.0.1" }}:9443' + ] + + {{- else }} + - job_name: 'platform' metrics_path: "/api/v1/prometheus_metrics" static_configs: @@ -440,6 +454,8 @@ data: '{{ eq .Values.ip_version_support "v6_only" | ternary "[::1]" "127.0.0.1" }}:9000' ] + {{- end }} + - job_name: 'node-agent' metrics_path: "/metrics" file_sd_configs: diff --git a/charts/yugabyte/yugaware/values.yaml b/charts/yugabyte/yugaware/values.yaml index ef7dfb6db..9b874591a 100644 --- a/charts/yugabyte/yugaware/values.yaml +++ b/charts/yugabyte/yugaware/values.yaml @@ -11,7 +11,7 @@ image: # including the yugaware image repository: quay.io/yugabyte/yugaware - tag: 2.18.6.0-b73 + tag: 2.18.7.0-b30 pullPolicy: IfNotPresent pullSecret: yugabyte-k8s-pull-secret ## Docker config JSON File name diff --git a/index.yaml b/index.yaml index 02949bc42..f7d9791bb 100644 --- a/index.yaml +++ b/index.yaml @@ -2868,8 +2868,8 @@ entries: argo-cd: - annotations: artifacthub.io/changes: | - - kind: changed - description: Bump argo-cd to v2.10.4 + - kind: added + description: Add sizeLimit params on EmptyDir Volume artifacthub.io/signKey: | fingerprint: 2B8F22F57260EFA67BE1C5824B11F800CD9D2252 url: https://argoproj.github.io/argo-helm/pgp_keys.asc @@ -2879,8 +2879,8 @@ entries: catalog.cattle.io/kube-version: '>=1.23.0-0' catalog.cattle.io/release-name: argo-cd apiVersion: v2 - appVersion: v2.10.4 - created: "2024-03-25T14:58:27.97528249-06:00" + appVersion: v2.10.5 + created: "2024-04-03T10:29:58.18156922-06:00" dependencies: - condition: redis-ha.enabled name: redis-ha @@ -2888,7 +2888,46 @@ entries: version: 4.26.1 description: A Helm chart for Argo CD, a declarative, GitOps continuous delivery tool for Kubernetes. - digest: f8815a3a3f97258a8a44f77ae335f4ab61ec59ad37552a7d9045ff6f505ac83b + digest: 8937dabdf0f2237a1071750a815d986d8b77a48aee69a77c428f6dfeae47ede4 + home: https://github.com/argoproj/argo-helm + icon: https://argo-cd.readthedocs.io/en/stable/assets/logo.png + keywords: + - argoproj + - argocd + - gitops + kubeVersion: '>=1.23.0-0' + maintainers: + - name: argoproj + url: https://argoproj.github.io/ + name: argo-cd + sources: + - https://github.com/argoproj/argo-helm/tree/main/charts/argo-cd + - https://github.com/argoproj/argo-cd + urls: + - assets/argo/argo-cd-6.7.8.tgz + version: 6.7.8 + - annotations: + artifacthub.io/changes: | + - kind: changed + description: Bump argo-cd to v2.10.4 + artifacthub.io/signKey: | + fingerprint: 2B8F22F57260EFA67BE1C5824B11F800CD9D2252 + url: https://argoproj.github.io/argo-helm/pgp_keys.asc + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Argo CD + catalog.cattle.io/kube-version: '>=1.23.0-0' + catalog.cattle.io/release-name: argo-cd + apiVersion: v2 + appVersion: v2.10.4 + created: "2024-04-03T10:29:36.757968568-06:00" + dependencies: + - condition: redis-ha.enabled + name: redis-ha + repository: file://./charts/redis-ha + version: 4.26.1 + description: A Helm chart for Argo CD, a declarative, GitOps continuous delivery + tool for Kubernetes. + digest: 1d8957ad4bd4f5f41268c0a3806fb1399b2646164ecb2ef0488588ded90b7be0 home: https://github.com/argoproj/argo-helm icon: https://argo-cd.readthedocs.io/en/stable/assets/logo.png keywords: @@ -6587,6 +6626,39 @@ entries: - assets/argo/argo-cd-5.8.0.tgz version: 5.8.0 artifactory-ha: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: JFrog Artifactory HA + catalog.cattle.io/kube-version: '>= 1.19.0-0' + catalog.cattle.io/release-name: artifactory-ha + apiVersion: v2 + appVersion: 7.77.8 + created: "2024-04-03T10:30:03.164523465-06:00" + dependencies: + - condition: postgresql.enabled + name: postgresql + repository: file://./charts/postgresql + version: 10.3.18 + description: Universal Repository Manager supporting all major packaging formats, + build tools and CI servers. + digest: bdc8c0139833e5db6d94564d9ab159bcd5f04509095cef7976d645db90f17e43 + home: https://www.jfrog.com/artifactory/ + icon: https://raw.githubusercontent.com/jfrog/charts/ea5c3112c24a973f64f3ccd99747323db292a369/stable/artifactory-ha/logo/artifactory-logo.png + keywords: + - artifactory + - jfrog + - devops + kubeVersion: '>= 1.19.0-0' + maintainers: + - email: installers@jfrog.com + name: Chart Maintainers at JFrog + name: artifactory-ha + sources: + - https://github.com/jfrog/charts + type: application + urls: + - assets/jfrog/artifactory-ha-107.77.8.tgz + version: 107.77.8 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: JFrog Artifactory HA @@ -8132,6 +8204,40 @@ entries: - assets/jfrog/artifactory-ha-3.0.1400.tgz version: 3.0.1400 artifactory-jcr: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: JFrog Container Registry + catalog.cattle.io/kube-version: '>= 1.19.0-0' + catalog.cattle.io/release-name: artifactory-jcr + apiVersion: v2 + appVersion: 7.77.8 + created: "2024-04-03T10:30:03.466401112-06:00" + dependencies: + - name: artifactory + repository: file://./charts/artifactory + version: 107.77.8 + description: JFrog Container Registry + digest: b1da53519629a6239594afdbc3cb09cdd5c6247a5799e98e81696e0d416576eb + home: https://jfrog.com/container-registry/ + icon: https://raw.githubusercontent.com/jfrog/charts/ea5c3112c24a973f64f3ccd99747323db292a369/stable/artifactory-jcr/logo/jcr-logo.png + keywords: + - artifactory + - jfrog + - container + - registry + - devops + - jfrog-container-registry + kubeVersion: '>= 1.19.0-0' + maintainers: + - email: helm@jfrog.com + name: Chart Maintainers at JFrog + name: artifactory-jcr + sources: + - https://github.com/jfrog/charts + type: application + urls: + - assets/jfrog/artifactory-jcr-107.77.8.tgz + version: 107.77.8 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: JFrog Container Registry @@ -12269,6 +12375,48 @@ entries: - assets/asserts/asserts-1.6.0.tgz version: 1.6.0 cassandra: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Apache Cassandra + catalog.cattle.io/kube-version: '>=1.19-0' + catalog.cattle.io/release-name: cassandra + category: Database + images: | + - name: cassandra + image: docker.io/bitnami/cassandra:4.1.4-debian-12-r5 + - name: cassandra-exporter + image: docker.io/bitnami/cassandra-exporter:2.3.8-debian-12-r18 + - name: os-shell + image: docker.io/bitnami/os-shell:12-debian-12-r17 + licenses: Apache-2.0 + apiVersion: v2 + appVersion: 4.1.4 + created: "2024-04-03T10:29:59.232303503-06:00" + dependencies: + - name: common + repository: file://./charts/common + tags: + - bitnami-common + version: 2.x.x + description: Apache Cassandra is an open source distributed database management + system designed to handle large amounts of data across many servers, providing + high availability with no single point of failure. + digest: e491c79d19a8745e4eea3c039b90849b11c32a81883973689ae2aac5d9f1430e + home: https://bitnami.com + icon: https://svn.apache.org/repos/asf/comdev/project-logos/originals/cassandra-4.svg + keywords: + - cassandra + - database + - nosql + maintainers: + - name: VMware, Inc. + url: https://github.com/bitnami/charts + name: cassandra + sources: + - https://github.com/bitnami/charts/tree/main/bitnami/cassandra + urls: + - assets/bitnami/cassandra-11.0.1.tgz + version: 11.0.1 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: Apache Cassandra @@ -15552,6 +15700,27 @@ entries: - assets/cloudcasa/cloudcasa-0.1.000.tgz version: 0.1.000 cockroachdb: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: CockroachDB + catalog.cattle.io/kube-version: '>=1.8-0' + catalog.cattle.io/release-name: cockroachdb + apiVersion: v1 + appVersion: 23.2.3 + created: "2024-04-03T10:30:01.452629038-06:00" + description: CockroachDB is a scalable, survivable, strongly-consistent SQL database. + digest: b40e73cd81e6f05fb4181c677cd5e66c7683de721f926bf30235f290ce309832 + home: https://www.cockroachlabs.com + icon: https://raw.githubusercontent.com/cockroachdb/cockroach/master/docs/media/cockroach_db.png + maintainers: + - email: helm-charts@cockroachlabs.com + name: cockroachlabs + name: cockroachdb + sources: + - https://github.com/cockroachdb/cockroach + urls: + - assets/cockroach-labs/cockroachdb-12.0.3.tgz + version: 12.0.3 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: CockroachDB @@ -16875,6 +17044,47 @@ entries: - assets/confluent/confluent-for-kubernetes-0.174.2101.tgz version: 0.174.2101 consul: + - annotations: + artifacthub.io/images: | + - name: consul + image: hashicorp/consul:1.18.1 + - name: consul-k8s-control-plane + image: hashicorp/consul-k8s-control-plane:1.4.1 + - name: consul-dataplane + image: hashicorp/consul-dataplane:1.4.1 + - name: envoy + image: envoyproxy/envoy:v1.25.11 + artifacthub.io/license: MPL-2.0 + artifacthub.io/links: | + - name: Documentation + url: https://www.consul.io/docs/k8s + - name: hashicorp/consul + url: https://github.com/hashicorp/consul + - name: hashicorp/consul-k8s + url: https://github.com/hashicorp/consul-k8s + artifacthub.io/prerelease: "false" + artifacthub.io/signKey: | + fingerprint: C874011F0AB405110D02105534365D9472D7468F + url: https://keybase.io/hashicorp/pgp_keys.asc + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Hashicorp Consul + catalog.cattle.io/kube-version: '>=1.22.0-0' + catalog.cattle.io/release-name: consul + apiVersion: v2 + appVersion: 1.18.1 + created: "2024-04-03T10:30:02.66322376-06:00" + description: Official HashiCorp Consul Chart + digest: cf5d0025ff16b582af0e228cbd63b1e19d70bfb5fbf887366a20e86804732c2f + home: https://www.consul.io + icon: https://raw.githubusercontent.com/hashicorp/consul-k8s/main/assets/icon.png + kubeVersion: '>=1.22.0-0' + name: consul + sources: + - https://github.com/hashicorp/consul + - https://github.com/hashicorp/consul-k8s + urls: + - assets/hashicorp/consul-1.4.1.tgz + version: 1.4.1 - annotations: artifacthub.io/images: | - name: consul @@ -17710,11 +17920,29 @@ entries: catalog.cattle.io/featured: "1" catalog.cattle.io/release-name: cost-analyzer apiVersion: v2 - appVersion: 2.1.1 - created: "2024-03-15T00:32:28.020881641Z" + appVersion: 2.2.0 + created: "2024-04-03T10:30:15.855721515-06:00" description: A Helm chart that sets up Kubecost, Prometheus, and Grafana to monitor cloud costs. - digest: 2e35cc466afbbd833875a1cafd9961df41b618f7629537578c030f8bccf979f9 + digest: fbb24d7eb0c81d97d01a0d73d93f6ae5cbfc5be08a861a1a58549fc30b7bf19b + icon: https://partner-charts.rancher.io/assets/logos/kubecost.png + name: cost-analyzer + urls: + - assets/kubecost/cost-analyzer-2.2.0.tgz + version: 2.2.0 + - annotations: + artifacthub.io/links: | + - name: Homepage + url: https://www.kubecost.com + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Kubecost + catalog.cattle.io/release-name: cost-analyzer + apiVersion: v2 + appVersion: 2.1.1 + created: "2024-04-03T10:30:04.442154892-06:00" + description: A Helm chart that sets up Kubecost, Prometheus, and Grafana to monitor + cloud costs. + digest: 7489ee85af777d0c82804b6c3e43756ddee074fef37676933e853ffefd0988e2 icon: https://partner-charts.rancher.io/assets/logos/kubecost.png name: cost-analyzer urls: @@ -20680,6 +20908,43 @@ entries: - assets/weka/csi-wekafsplugin-0.6.400.tgz version: 0.6.400 datadog: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Datadog + catalog.cattle.io/kube-version: '>=1.10-0' + catalog.cattle.io/release-name: datadog + apiVersion: v1 + appVersion: "7" + created: "2024-04-03T10:30:02.040536356-06:00" + dependencies: + - condition: clusterAgent.metricsProvider.useDatadogMetrics + name: datadog-crds + repository: https://helm.datadoghq.com + tags: + - install-crds + version: 1.0.1 + - condition: datadog.kubeStateMetricsEnabled + name: kube-state-metrics + repository: https://prometheus-community.github.io/helm-charts + version: 2.13.2 + description: Datadog Agent + digest: c47cb512ba4a8fb8202c20c3588585eed8f0a112631a5f586f1d08e97d3681ea + home: https://www.datadoghq.com + icon: https://datadog-live.imgix.net/img/dd_logo_70x75.png + keywords: + - monitoring + - alerting + - metric + maintainers: + - email: support@datadoghq.com + name: Datadog + name: datadog + sources: + - https://app.datadoghq.com/account/settings#agent/kubernetes + - https://github.com/DataDog/datadog-agent + urls: + - assets/datadog/datadog-3.59.4.tgz + version: 3.59.4 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: Datadog @@ -23846,6 +24111,39 @@ entries: - assets/datadog/datadog-2.4.200.tgz version: 2.4.200 datadog-operator: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Datadog Operator + catalog.cattle.io/release-name: datadog-operator + apiVersion: v2 + appVersion: 1.5.0 + created: "2024-04-03T10:30:02.122486352-06:00" + dependencies: + - alias: datadogCRDs + condition: installCRDs + name: datadog-crds + repository: file://./charts/datadog-crds + tags: + - install-crds + version: =1.5.0 + description: Datadog Operator + digest: a66d8a8c29999538e3f130be5aac634c5ca374638666c73956ea8e1d8f9074d1 + home: https://www.datadoghq.com + icon: https://datadog-live.imgix.net/img/dd_logo_70x75.png + keywords: + - monitoring + - alerting + - metric + maintainers: + - email: support@datadoghq.com + name: Datadog + name: datadog-operator + sources: + - https://app.datadoghq.com/account/settings#agent/kubernetes + - https://github.com/DataDog/datadog-agent + urls: + - assets/datadog/datadog-operator-1.6.0.tgz + version: 1.6.0 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: Datadog Operator @@ -24762,6 +25060,33 @@ entries: - assets/dh2i/dxoperator-1.0.1.tgz version: 1.0.1 dynatrace-operator: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Dynatrace Operator + catalog.cattle.io/kube-version: '>=1.19.0-0' + catalog.cattle.io/release-name: dynatrace-operator + apiVersion: v2 + appVersion: 1.0.0 + created: "2024-04-03T10:30:02.185618937-06:00" + description: The Dynatrace Operator Helm chart for Kubernetes and OpenShift + digest: 5596e5ca18536c09cb4e3886ba6ea8a6a9d1e325f45376213e0f6dc2da5ae4ac + home: https://www.dynatrace.com/ + icon: https://assets.dynatrace.com/global/resources/Signet_Logo_RGB_CP_512x512px.png + kubeVersion: '>=1.19.0-0' + maintainers: + - email: marcell.sevcsik@dynatrace.com + name: 0sewa0 + - email: christoph.muellner@dynatrace.com + name: chrismuellner + - email: lukas.hinterreiter@dynatrace.com + name: luhi-DT + name: dynatrace-operator + sources: + - https://github.com/Dynatrace/dynatrace-operator + type: application + urls: + - assets/dynatrace/dynatrace-operator-1.0.0.tgz + version: 1.0.0 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: Dynatrace Operator @@ -25373,6 +25698,30 @@ entries: - assets/elastic/elasticsearch-7.17.3.tgz version: 7.17.3 external-secrets: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: External Secrets Operator + catalog.cattle.io/kube-version: '>= 1.19.0-0' + catalog.cattle.io/release-name: external-secrets + apiVersion: v2 + appVersion: v0.9.14 + created: "2024-04-03T10:30:02.23329035-06:00" + description: External secret management for Kubernetes + digest: be8ad6971fddc62f2f46bdfce6b7b0c6bf0d40188619f7cb52ba61a673488f74 + home: https://github.com/external-secrets/external-secrets + icon: https://raw.githubusercontent.com/external-secrets/external-secrets/main/assets/eso-logo-large.png + keywords: + - kubernetes-external-secrets + - secrets + kubeVersion: '>= 1.19.0-0' + maintainers: + - email: kellinmcavoy@gmail.com + name: mcavoyk + name: external-secrets + type: application + urls: + - assets/external-secrets/external-secrets-0.9.14.tgz + version: 0.9.14 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: External Secrets Operator @@ -32793,6 +33142,63 @@ entries: - assets/jaeger/jaeger-operator-2.36.0.tgz version: 2.36.0 jenkins: + - annotations: + artifacthub.io/category: integration-delivery + artifacthub.io/changes: | + - Fix Prometheus controller name. + artifacthub.io/images: | + - name: jenkins + image: docker.io/jenkins/jenkins:2.440.2-jdk17 + - name: k8s-sidecar + image: docker.io/kiwigrid/k8s-sidecar:1.26.1 + - name: inbound-agent + image: jenkins/inbound-agent:3206.vb_15dcf73f6a_9-3 + artifacthub.io/license: Apache-2.0 + artifacthub.io/links: | + - name: Chart Source + url: https://github.com/jenkinsci/helm-charts/tree/main/charts/jenkins + - name: Jenkins + url: https://www.jenkins.io/ + - name: support + url: https://github.com/jenkinsci/helm-charts/issues + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Jenkins + catalog.cattle.io/kube-version: '>=1.14-0' + catalog.cattle.io/release-name: jenkins + apiVersion: v2 + appVersion: 2.440.2 + created: "2024-04-03T10:30:02.888436364-06:00" + description: 'Jenkins - Build great things at any scale! As the leading open source + automation server, Jenkins provides over 1800 plugins to support building, deploying + and automating any project. ' + digest: a5b8617581eed75e6f01732e48f9e9e23f8abd03b5719978075ee33f467f7656 + home: https://www.jenkins.io/ + icon: https://get.jenkins.io/art/jenkins-logo/logo.svg + keywords: + - jenkins + - ci + - devops + maintainers: + - email: maor.friedman@redhat.com + name: maorfr + - email: mail@torstenwalter.de + name: torstenwalter + - email: garridomota@gmail.com + name: mogaal + - email: wmcdona89@gmail.com + name: wmcdona89 + - email: timjacomb1@gmail.com + name: timja + name: jenkins + sources: + - https://github.com/jenkinsci/jenkins + - https://github.com/jenkinsci/docker-inbound-agent + - https://github.com/maorfr/kube-tasks + - https://github.com/jenkinsci/configuration-as-code-plugin + type: application + urls: + - assets/jenkins/jenkins-5.1.5.tgz + version: 5.1.5 - annotations: artifacthub.io/category: integration-delivery artifacthub.io/changes: | @@ -36246,6 +36652,34 @@ entries: - assets/trilio/k8s-triliovault-operator-v2.0.200.tgz version: v2.0.200 k10: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: K10 + catalog.cattle.io/kube-version: '>= 1.17.0-0' + catalog.cattle.io/release-name: k10 + apiVersion: v2 + appVersion: 6.5.10 + created: "2024-04-03T10:30:03.850626702-06:00" + dependencies: + - condition: grafana.enabled + name: grafana + repository: file://./charts/grafana + version: 7.3.2 + - condition: prometheus.server.enabled + name: prometheus + repository: file://./charts/prometheus + version: 25.12.0 + description: Kasten’s K10 Data Management Platform + digest: 730101e124f6c76a31323bf8b950465c3760931aff80e8e6ebc1defea403ba32 + home: https://kasten.io/ + icon: https://docs.kasten.io/_static/logo-kasten-k10-blue-white.png + maintainers: + - email: contact@kasten.io + name: kastenIO + name: k10 + urls: + - assets/kasten/k10-6.5.1001.tgz + version: 6.5.1001 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: K10 @@ -37411,6 +37845,58 @@ entries: - assets/kasten/k10-4.5.900.tgz version: 4.5.900 kafka: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Apache Kafka + catalog.cattle.io/kube-version: '>=1.19-0' + catalog.cattle.io/release-name: kafka + category: Infrastructure + images: | + - name: jmx-exporter + image: docker.io/bitnami/jmx-exporter:0.20.0-debian-12-r11 + - name: kafka + image: docker.io/bitnami/kafka:3.7.0-debian-12-r0 + - name: kafka-exporter + image: docker.io/bitnami/kafka-exporter:1.7.0-debian-12-r19 + - name: kubectl + image: docker.io/bitnami/kubectl:1.29.2-debian-12-r2 + - name: os-shell + image: docker.io/bitnami/os-shell:12-debian-12-r16 + licenses: Apache-2.0 + apiVersion: v2 + appVersion: 3.7.0 + created: "2024-04-03T10:29:59.649010191-06:00" + dependencies: + - condition: zookeeper.enabled + name: zookeeper + repository: file://./charts/zookeeper + version: 13.x.x + - name: common + repository: file://./charts/common + tags: + - bitnami-common + version: 2.x.x + description: Apache Kafka is a distributed streaming platform designed to build + real-time pipelines and can be used as a message broker or as a replacement + for a log aggregation solution for big data applications. + digest: b2e5b60191ba4e53781c3e2d6182bf78b70fd187347cf1db13819addb072faaa + home: https://bitnami.com + icon: https://svn.apache.org/repos/asf/comdev/project-logos/originals/kafka.svg + keywords: + - kafka + - zookeeper + - streaming + - producer + - consumer + maintainers: + - name: VMware, Inc. + url: https://github.com/bitnami/charts + name: kafka + sources: + - https://github.com/bitnami/charts/tree/main/bitnami/kafka + urls: + - assets/bitnami/kafka-28.0.1.tgz + version: 28.0.1 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: Apache Kafka @@ -43447,6 +43933,33 @@ entries: - assets/avesha/kubeslice-worker-0.4.5.tgz version: 0.4.5 kuma: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Kuma + catalog.cattle.io/namespace: kuma-system + catalog.cattle.io/release-name: kuma + apiVersion: v2 + appVersion: 2.6.4 + created: "2024-04-03T10:30:15.912147864-06:00" + description: A Helm chart for the Kuma Control Plane + digest: 61490a9a9f80110db1672355deaf7f380c00e9e19afd0328f013f498f2afd423 + home: https://github.com/kumahq/kuma + icon: https://kuma.io/assets/images/brand/kuma-logo-new.svg + keywords: + - service mesh + - control plane + maintainers: + - email: austin.cawley@gmail.com + name: austince + - email: jakub.dyszkiewicz@konghq.com + name: jakubdyszkiewicz + - email: nikolay.nikolaev@konghq.com + name: nickolaev + name: kuma + type: application + urls: + - assets/kuma/kuma-2.6.4.tgz + version: 2.6.4 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: Kuma @@ -44078,15 +44591,47 @@ entries: catalog.cattle.io/kube-version: '>=1.22.0-0' catalog.cattle.io/release-name: linkerd-control-plane apiVersion: v2 - appVersion: edge-24.3.4 - created: "2024-03-25T14:59:07.296582648-06:00" + appVersion: edge-24.3.5 + created: "2024-04-03T10:30:29.925844916-06:00" dependencies: - name: partials repository: file://./charts/partials version: 0.1.0 description: 'Linkerd gives you observability, reliability, and security for your microservices — with no code change required. ' - digest: 15cef9e1f22b15540dfa4aaafef2c315ebfccf236c9f441c1c0179ff78ed429f + digest: 29cba45fb8ba8d2433c2efbe27bdb5a87177a53ad4f242fd3777fd830038c749 + home: https://linkerd.io + icon: https://linkerd.io/images/logo-only-200h.png + keywords: + - service-mesh + kubeVersion: '>=1.22.0-0' + maintainers: + - email: cncf-linkerd-dev@lists.cncf.io + name: Linkerd authors + url: https://linkerd.io/ + name: linkerd-control-plane + sources: + - https://github.com/linkerd/linkerd2/ + type: application + urls: + - assets/linkerd/linkerd-control-plane-2024.3.5.tgz + version: 2024.3.5 + - annotations: + catalog.cattle.io/auto-install: linkerd-crds + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Linkerd Control Plane + catalog.cattle.io/kube-version: '>=1.22.0-0' + catalog.cattle.io/release-name: linkerd-control-plane + apiVersion: v2 + appVersion: edge-24.3.4 + created: "2024-04-03T10:30:15.930416504-06:00" + dependencies: + - name: partials + repository: file://./charts/partials + version: 0.1.0 + description: 'Linkerd gives you observability, reliability, and security for your + microservices — with no code change required. ' + digest: 24655ccae813723ec6a64db9f7959fe80b84cab469f01433b726432aae7f7c49 home: https://linkerd.io icon: https://linkerd.io/images/logo-only-200h.png keywords: @@ -44477,6 +45022,36 @@ entries: - assets/linkerd/linkerd-control-plane-1.12.5.tgz version: 1.12.5 linkerd-crds: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Linkerd CRDs + catalog.cattle.io/kube-version: '>=1.22.0-0' + catalog.cattle.io/release-name: linkerd-crds + apiVersion: v2 + created: "2024-04-03T10:30:29.933562768-06:00" + dependencies: + - name: partials + repository: file://./charts/partials + version: 0.1.0 + description: 'Linkerd gives you observability, reliability, and security for your + microservices — with no code change required. ' + digest: fb6436d27250ef663df396a4c72a6e6cf50e1f19353d964ea9eb110e63167513 + home: https://linkerd.io + icon: https://linkerd.io/images/logo-only-200h.png + keywords: + - service-mesh + kubeVersion: '>=1.22.0-0' + maintainers: + - email: cncf-linkerd-dev@lists.cncf.io + name: Linkerd authors + url: https://linkerd.io/ + name: linkerd-crds + sources: + - https://github.com/linkerd/linkerd2/ + type: application + urls: + - assets/linkerd/linkerd-crds-2024.3.5.tgz + version: 2024.3.5 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: Linkerd CRDs @@ -45247,6 +45822,50 @@ entries: - assets/elastic/logstash-7.17.3.tgz version: 7.17.3 mariadb: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: MariaDB + catalog.cattle.io/kube-version: '>=1.19-0' + catalog.cattle.io/release-name: mariadb + category: Database + images: | + - name: mariadb + image: docker.io/bitnami/mariadb:11.3.2-debian-12-r1 + - name: mysqld-exporter + image: docker.io/bitnami/mysqld-exporter:0.15.1-debian-12-r10 + - name: os-shell + image: docker.io/bitnami/os-shell:12-debian-12-r18 + licenses: Apache-2.0 + apiVersion: v2 + appVersion: 11.3.2 + created: "2024-04-03T10:29:59.772597777-06:00" + dependencies: + - name: common + repository: file://./charts/common + tags: + - bitnami-common + version: 2.x.x + description: MariaDB is an open source, community-developed SQL database server + that is widely in use around the world due to its enterprise features, flexibility, + and collaboration with leading tech firms. + digest: 704cc945532c0d6c9dc50b8d57dfc6516c06457c7161d048edcf92b4fd2e96bd + home: https://bitnami.com + icon: https://mariadb.com/wp-content/uploads/2019/11/mariadb-logo-vert_black-transparent.png + keywords: + - mariadb + - mysql + - database + - sql + - prometheus + maintainers: + - name: VMware, Inc. + url: https://github.com/bitnami/charts + name: mariadb + sources: + - https://github.com/bitnami/charts/tree/main/bitnami/mariadb + urls: + - assets/bitnami/mariadb-18.0.1.tgz + version: 18.0.1 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: MariaDB @@ -47281,6 +47900,37 @@ entries: - assets/bitnami/mariadb-11.3.3.tgz version: 11.3.3 metallb: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: MetalLB + catalog.cattle.io/kube-version: '>= 1.19.0-0' + catalog.cattle.io/namespace: metallb-system + catalog.cattle.io/release-name: metallb + apiVersion: v2 + appVersion: v0.14.4 + created: "2024-04-03T10:30:29.963141386-06:00" + dependencies: + - condition: crds.enabled + name: crds + repository: file://./charts/crds + version: 0.14.4 + - condition: frrk8s.enabled + name: frr-k8s + repository: file://./charts/frr-k8s + version: 0.0.10 + description: A network load-balancer implementation for Kubernetes using standard + routing protocols + digest: 43b4e3fac14281b840f7b4fa1cdd90957a56eb9f28ce9522e1c753ad98ea5b65 + home: https://metallb.universe.tf + icon: https://metallb.universe.tf/images/logo/metallb-blue.png + kubeVersion: '>= 1.19.0-0' + name: metallb + sources: + - https://github.com/metallb/metallb + type: application + urls: + - assets/metallb/metallb-0.14.4.tgz + version: 0.14.4 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: MetalLB @@ -47967,6 +48617,50 @@ entries: - assets/minio/minio-operator-4.4.1700.tgz version: 4.4.1700 mysql: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: MySQL + catalog.cattle.io/kube-version: '>=1.19-0' + catalog.cattle.io/release-name: mysql + category: Database + images: | + - name: mysql + image: docker.io/bitnami/mysql:8.0.36-debian-12-r10 + - name: mysqld-exporter + image: docker.io/bitnami/mysqld-exporter:0.15.1-debian-12-r10 + - name: os-shell + image: docker.io/bitnami/os-shell:12-debian-12-r18 + licenses: Apache-2.0 + apiVersion: v2 + appVersion: 8.0.36 + created: "2024-04-03T10:29:59.778257901-06:00" + dependencies: + - name: common + repository: file://./charts/common + tags: + - bitnami-common + version: 2.x.x + description: MySQL is a fast, reliable, scalable, and easy to use open source + relational database system. Designed to handle mission-critical, heavy-load + production applications. + digest: ae73ec0da801a78c3f6def6bbc714414c1f842714c6a42d95cccb1326d2f83d7 + home: https://bitnami.com + icon: https://www.mysql.com/common/logos/logo-mysql-170x115.png + keywords: + - mysql + - database + - sql + - cluster + - high availability + maintainers: + - name: VMware, Inc. + url: https://github.com/bitnami/charts + name: mysql + sources: + - https://github.com/bitnami/charts/tree/main/bitnami/mysql + urls: + - assets/bitnami/mysql-10.1.1.tgz + version: 10.1.1 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: MySQL @@ -50555,6 +51249,32 @@ entries: - assets/nats/nats-0.10.0.tgz version: 0.10.0 nginx-ingress: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: NGINX Ingress Controller + catalog.cattle.io/kube-version: '>= 1.23.0-0' + catalog.cattle.io/release-name: nginx-ingress + apiVersion: v2 + appVersion: 3.5.0 + created: "2024-04-03T10:30:02.291336286-06:00" + description: NGINX Ingress Controller + digest: 9195fa096a291f59a0da73ac71ca0ba6b867a8a17bcc5035613e8da815911213 + home: https://github.com/nginxinc/kubernetes-ingress + icon: https://raw.githubusercontent.com/nginxinc/kubernetes-ingress/v3.4.3/charts/nginx-ingress/chart-icon.png + keywords: + - ingress + - nginx + kubeVersion: '>= 1.23.0-0' + maintainers: + - email: kubernetes@nginx.com + name: nginxinc + name: nginx-ingress + sources: + - https://github.com/nginxinc/kubernetes-ingress/tree/v3.4.3/charts/nginx-ingress + type: application + urls: + - assets/f5/nginx-ingress-1.2.0.tgz + version: 1.2.0 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: NGINX Ingress Controller @@ -51214,6 +51934,90 @@ entries: - assets/f5/nginx-service-mesh-0.2.100.tgz version: 0.2.100 nri-bundle: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: New Relic + catalog.cattle.io/release-name: nri-bundle + apiVersion: v2 + created: "2024-04-03T10:30:30.478265619-06:00" + dependencies: + - condition: infrastructure.enabled,newrelic-infrastructure.enabled + name: newrelic-infrastructure + repository: file://./charts/newrelic-infrastructure + version: 3.33.1 + - condition: prometheus.enabled,nri-prometheus.enabled + name: nri-prometheus + repository: file://./charts/nri-prometheus + version: 2.1.17 + - condition: newrelic-prometheus-agent.enabled + name: newrelic-prometheus-agent + repository: file://./charts/newrelic-prometheus-agent + version: 1.12.0 + - condition: webhook.enabled,nri-metadata-injection.enabled + name: nri-metadata-injection + repository: file://./charts/nri-metadata-injection + version: 4.18.3 + - condition: metrics-adapter.enabled,newrelic-k8s-metrics-adapter.enabled + name: newrelic-k8s-metrics-adapter + repository: file://./charts/newrelic-k8s-metrics-adapter + version: 1.10.2 + - condition: ksm.enabled,kube-state-metrics.enabled + name: kube-state-metrics + repository: file://./charts/kube-state-metrics + version: 5.12.1 + - condition: kubeEvents.enabled,nri-kube-events.enabled + name: nri-kube-events + repository: file://./charts/nri-kube-events + version: 3.9.3 + - condition: logging.enabled,newrelic-logging.enabled + name: newrelic-logging + repository: file://./charts/newrelic-logging + version: 1.21.2 + - condition: newrelic-pixie.enabled + name: newrelic-pixie + repository: file://./charts/newrelic-pixie + version: 2.1.4 + - alias: pixie-chart + condition: pixie-chart.enabled + name: pixie-operator-chart + repository: file://./charts/pixie-operator-chart + version: 0.1.4 + - condition: newrelic-infra-operator.enabled + name: newrelic-infra-operator + repository: file://./charts/newrelic-infra-operator + version: 2.10.0 + description: Groups together the individual charts for the New Relic Kubernetes + solution for a more comfortable deployment. + digest: 6172e3c929b051b4a9e49d18a55f26a3e7b8b384bf8c6224d729c5ebe9201229 + home: https://github.com/newrelic/helm-charts + icon: https://newrelic.com/themes/custom/erno/assets/mediakit/new_relic_logo_vertical.svg + keywords: + - infrastructure + - newrelic + - monitoring + maintainers: + - name: juanjjaramillo + url: https://github.com/juanjjaramillo + - name: csongnr + url: https://github.com/csongnr + - name: dbudziwojskiNR + url: https://github.com/dbudziwojskiNR + name: nri-bundle + sources: + - https://github.com/newrelic/nri-bundle/ + - https://github.com/newrelic/nri-bundle/tree/master/charts/nri-bundle + - https://github.com/newrelic/nri-kubernetes/tree/master/charts/newrelic-infrastructure + - https://github.com/newrelic/nri-prometheus/tree/master/charts/nri-prometheus + - https://github.com/newrelic/newrelic-prometheus-configurator/tree/master/charts/newrelic-prometheus-agent + - https://github.com/newrelic/k8s-metadata-injection/tree/master/charts/nri-metadata-injection + - https://github.com/newrelic/newrelic-k8s-metrics-adapter/tree/master/charts/newrelic-k8s-metrics-adapter + - https://github.com/newrelic/nri-kube-events/tree/master/charts/nri-kube-events + - https://github.com/newrelic/helm-charts/tree/master/charts/newrelic-logging + - https://github.com/newrelic/helm-charts/tree/master/charts/newrelic-pixie + - https://github.com/newrelic/newrelic-infra-operator/tree/master/charts/newrelic-infra-operator + urls: + - assets/new-relic/nri-bundle-5.0.72.tgz + version: 5.0.72 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: New Relic @@ -56089,6 +56893,25 @@ entries: - assets/pixie/pixie-operator-chart-0.0.2501.tgz version: 0.0.2501 polaris: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Fairwinds Polaris + catalog.cattle.io/kube-version: '>= 1.22.0-0' + catalog.cattle.io/release-name: polaris + apiVersion: v1 + appVersion: "8.5" + created: "2024-04-03T10:30:02.309364675-06:00" + description: Validation of best practices in your Kubernetes clusters + digest: 35d6611b768b0950728d3e35c09ef78cc053acd7c1dd35ee0af2e3972638ce24 + icon: https://polaris.docs.fairwinds.com/img/polaris-logo.png + kubeVersion: '>= 1.22.0-0' + maintainers: + - email: robertb@fairwinds.com + name: rbren + name: polaris + urls: + - assets/fairwinds/polaris-5.17.1.tgz + version: 5.17.1 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: Fairwinds Polaris @@ -56318,6 +57141,51 @@ entries: - assets/portshift-operator/portshift-operator-0.1.000.tgz version: 0.1.000 postgresql: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: PostgreSQL + catalog.cattle.io/kube-version: '>=1.19-0' + catalog.cattle.io/release-name: postgresql + category: Database + images: | + - name: os-shell + image: docker.io/bitnami/os-shell:12-debian-12-r18 + - name: postgres-exporter + image: docker.io/bitnami/postgres-exporter:0.15.0-debian-12-r15 + - name: postgresql + image: docker.io/bitnami/postgresql:16.2.0-debian-12-r12 + licenses: Apache-2.0 + apiVersion: v2 + appVersion: 16.2.0 + created: "2024-04-03T10:30:00.099546873-06:00" + dependencies: + - name: common + repository: file://./charts/common + tags: + - bitnami-common + version: 2.x.x + description: PostgreSQL (Postgres) is an open source object-relational database + known for reliability and data integrity. ACID-compliant, it supports foreign + keys, joins, views, triggers and stored procedures. + digest: 7c4a1b0df870086b2192e221a23c83d2178c7b5b5d8e58fcd914782b34be34f4 + home: https://bitnami.com + icon: https://wiki.postgresql.org/images/a/a4/PostgreSQL_logo.3colors.svg + keywords: + - postgresql + - postgres + - database + - sql + - replication + - cluster + maintainers: + - name: VMware, Inc. + url: https://github.com/bitnami/charts + name: postgresql + sources: + - https://github.com/bitnami/charts/tree/main/bitnami/postgresql + urls: + - assets/bitnami/postgresql-15.2.2.tgz + version: 15.2.2 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: PostgreSQL @@ -60500,6 +61368,52 @@ entries: - assets/quobyte/quobyte-cluster-0.1.5.tgz version: 0.1.5 redis: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Redis + catalog.cattle.io/kube-version: '>=1.19-0' + catalog.cattle.io/release-name: redis + category: Database + images: | + - name: kubectl + image: docker.io/bitnami/kubectl:1.29.2-debian-12-r3 + - name: os-shell + image: docker.io/bitnami/os-shell:12-debian-12-r16 + - name: redis + image: docker.io/bitnami/redis:7.2.4-debian-12-r9 + - name: redis-exporter + image: docker.io/bitnami/redis-exporter:1.58.0-debian-12-r4 + - name: redis-sentinel + image: docker.io/bitnami/redis-sentinel:7.2.4-debian-12-r7 + licenses: Apache-2.0 + apiVersion: v2 + appVersion: 7.2.4 + created: "2024-04-03T10:30:00.36108115-06:00" + dependencies: + - name: common + repository: file://./charts/common + tags: + - bitnami-common + version: 2.x.x + description: Redis(R) is an open source, advanced key-value store. It is often + referred to as a data structure server since keys can contain strings, hashes, + lists, sets and sorted sets. + digest: 1de68b076b5e06da5622104d73168e14989d2a0eca9b589e344985fae73e95a5 + home: https://bitnami.com + icon: https://redis.com/wp-content/uploads/2021/08/redis-logo.png + keywords: + - redis + - keyvalue + - database + maintainers: + - name: VMware, Inc. + url: https://github.com/bitnami/charts + name: redis + sources: + - https://github.com/bitnami/charts/tree/main/bitnami/redis + urls: + - assets/bitnami/redis-19.0.2.tgz + version: 19.0.2 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: Redis @@ -63176,6 +64090,50 @@ entries: - assets/bitnami/redis-17.3.7.tgz version: 17.3.7 redpanda: + - annotations: + artifacthub.io/images: | + - name: redpanda + image: docker.redpanda.com/redpandadata/redpanda:v23.3.10 + - name: busybox + image: busybox:latest + - name: mintel/docker-alpine-bash-curl-jq + image: mintel/docker-alpine-bash-curl-jq:latest + artifacthub.io/license: Apache-2.0 + artifacthub.io/links: | + - name: Documentation + url: https://docs.redpanda.com + - name: "Helm (>= 3.8.0)" + url: https://helm.sh/docs/intro/install/ + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Redpanda + catalog.cattle.io/kube-version: '>=1.21-0' + catalog.cattle.io/release-name: redpanda + apiVersion: v2 + appVersion: v23.3.10 + created: "2024-04-03T10:30:31.121563105-06:00" + dependencies: + - condition: console.enabled + name: console + repository: file://./charts/console + version: '>=0.5 <1.0' + - condition: connectors.enabled + name: connectors + repository: file://./charts/connectors + version: '>=0.1.2 <1.0' + description: Redpanda is the real-time engine for modern apps. + digest: 4ee0ffa03120543a8c5d95bc10cb72520e935848e4b082fe1ca7728455154220 + icon: https://images.ctfassets.net/paqvtpyf8rwu/3cYHw5UzhXCbKuR24GDFGO/73fb682e6157d11c10d5b2b5da1d5af0/skate-stand-panda.svg + kubeVersion: '>=1.21-0' + maintainers: + - name: redpanda-data + url: https://github.com/orgs/redpanda-data/people + name: redpanda + sources: + - https://github.com/redpanda-data/helm-charts + type: application + urls: + - assets/redpanda/redpanda-5.7.37.tgz + version: 5.7.37 - annotations: artifacthub.io/images: | - name: redpanda @@ -69335,6 +70293,37 @@ entries: - assets/bitnami/spark-6.3.8.tgz version: 6.3.8 speedscale-operator: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Speedscale Operator + catalog.cattle.io/kube-version: '>= 1.17.0-0' + catalog.cattle.io/release-name: speedscale-operator + apiVersion: v1 + appVersion: 2.1.186 + created: "2024-04-03T10:30:31.197065484-06:00" + description: Stress test your APIs with real world scenarios. Collect and replay + traffic without scripting. + digest: b405f3c2cc7800ddbacfdb77a563765907df5c875bbfd70dd107382dff47b347 + home: https://speedscale.com + icon: https://raw.githubusercontent.com/speedscale/assets/main/logo/gold_logo_only.png + keywords: + - speedscale + - test + - testing + - regression + - reliability + - load + - replay + - network + - traffic + kubeVersion: '>= 1.17.0-0' + maintainers: + - email: support@speedscale.com + name: Speedscale Support + name: speedscale-operator + urls: + - assets/speedscale/speedscale-operator-2.1.15.tgz + version: 2.1.15 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: Speedscale Operator @@ -71720,6 +72709,34 @@ entries: - assets/speedscale/speedscale-operator-0.9.12600.tgz version: 0.9.12600 stackstate-k8s-agent: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: StackState Agent + catalog.cattle.io/kube-version: '>=1.19.0-0' + catalog.cattle.io/release-name: stackstate-k8s-agent + apiVersion: v2 + appVersion: 3.0.0 + created: "2024-04-03T10:30:31.220951856-06:00" + dependencies: + - alias: httpHeaderInjectorWebhook + name: http-header-injector + repository: file://./charts/http-header-injector + version: 0.0.8 + description: Helm chart for the StackState Agent. + digest: 3fd9d8ce747245f80df5e5a610987acddcac86e2f06fc9ad3f2aaae07d547dd0 + home: https://github.com/StackVista/stackstate-agent + icon: https://raw.githubusercontent.com/StackVista/helm-charts/master/stable/stackstate-k8s-agent/logo.svg + keywords: + - monitoring + - observability + - stackstate + maintainers: + - email: ops@stackstate.com + name: Stackstate + name: stackstate-k8s-agent + urls: + - assets/stackstate/stackstate-k8s-agent-1.0.78.tgz + version: 1.0.78 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: StackState Agent @@ -73925,6 +74942,51 @@ entries: - assets/intel/tcs-issuer-0.1.0.tgz version: 0.1.0 tomcat: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Apache Tomcat + catalog.cattle.io/kube-version: '>=1.19-0' + catalog.cattle.io/release-name: tomcat + category: ApplicationServer + images: | + - name: jmx-exporter + image: docker.io/bitnami/jmx-exporter:0.20.0-debian-12-r12 + - name: os-shell + image: docker.io/bitnami/os-shell:12-debian-12-r17 + - name: tomcat + image: docker.io/bitnami/tomcat:10.1.20-debian-12-r0 + licenses: Apache-2.0 + apiVersion: v2 + appVersion: 10.1.20 + created: "2024-04-03T10:30:00.54071677-06:00" + dependencies: + - name: common + repository: file://./charts/common + tags: + - bitnami-common + version: 2.x.x + description: Apache Tomcat is an open-source web server designed to host and run + Java-based web applications. It is a lightweight server with a good performance + for applications running in production environments. + digest: ac8b459e53f2ae49685d0034b5f2f78b441c55e72538d34db4f9daf15ff3b621 + home: https://bitnami.com + icon: https://svn.apache.org/repos/asf/comdev/project-logos/originals/tomcat.svg + keywords: + - tomcat + - java + - http + - web + - application server + - jsp + maintainers: + - name: VMware, Inc. + url: https://github.com/bitnami/charts + name: tomcat + sources: + - https://github.com/bitnami/charts/tree/main/bitnami/tomcat + urls: + - assets/bitnami/tomcat-11.0.0.tgz + version: 11.0.0 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: Apache Tomcat @@ -77791,6 +78853,60 @@ entries: - assets/hashicorp/vault-0.22.0.tgz version: 0.22.0 wordpress: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: WordPress + catalog.cattle.io/kube-version: '>=1.19-0' + catalog.cattle.io/release-name: wordpress + category: CMS + images: | + - name: apache-exporter + image: docker.io/bitnami/apache-exporter:1.0.7-debian-12-r1 + - name: os-shell + image: docker.io/bitnami/os-shell:12-debian-12-r18 + - name: wordpress + image: docker.io/bitnami/wordpress:6.5.0-debian-12-r0 + licenses: Apache-2.0 + apiVersion: v2 + appVersion: 6.5.0 + created: "2024-04-03T10:30:01.219262352-06:00" + dependencies: + - condition: memcached.enabled + name: memcached + repository: file://./charts/memcached + version: 7.x.x + - condition: mariadb.enabled + name: mariadb + repository: file://./charts/mariadb + version: 18.x.x + - name: common + repository: file://./charts/common + tags: + - bitnami-common + version: 2.x.x + description: WordPress is the world's most popular blogging and content management + platform. Powerful yet simple, everyone from students to global corporations + use it to build beautiful, functional websites. + digest: bc04f634c0fce4251478e27e28d31fb7fa572b774769be77e6ce393fd0626747 + home: https://bitnami.com + icon: https://s.w.org/style/images/about/WordPress-logotype-simplified.png + keywords: + - application + - blog + - cms + - http + - php + - web + - wordpress + maintainers: + - name: VMware, Inc. + url: https://github.com/bitnami/charts + name: wordpress + sources: + - https://github.com/bitnami/charts/tree/main/bitnami/wordpress + urls: + - assets/bitnami/wordpress-22.1.0.tgz + version: 22.1.0 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: WordPress @@ -82976,6 +84092,32 @@ entries: - assets/bitnami/wordpress-15.2.6.tgz version: 15.2.6 yugabyte: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: YugabyteDB + catalog.cattle.io/kube-version: '>=1.18-0' + catalog.cattle.io/release-name: yugabyte + charts.openshift.io/name: yugabyte + apiVersion: v2 + appVersion: 2.18.7.0-b30 + created: "2024-04-03T10:30:31.755298696-06:00" + description: YugabyteDB is the high-performance distributed SQL database for building + global, internet-scale apps. + digest: 1eceda153155010d7eccc6d84eded8653dc612aaecfc9a5b45f2ac8b42249783 + home: https://www.yugabyte.com + icon: https://avatars0.githubusercontent.com/u/17074854?s=200&v=4 + kubeVersion: '>=1.18-0' + maintainers: + - email: sanketh@yugabyte.com + name: Sanketh Indarapu + - email: gjalla@yugabyte.com + name: Govardhan Reddy Jalla + name: yugabyte + sources: + - https://github.com/yugabyte/yugabyte-db + urls: + - assets/yugabyte/yugabyte-2.18.7.tgz + version: 2.18.7 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: YugabyteDB @@ -83725,6 +84867,32 @@ entries: - assets/yugabyte/yugabyte-2.14.3.tgz version: 2.14.3 yugaware: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: YugabyteDB Anywhere + catalog.cattle.io/kube-version: '>=1.18-0' + catalog.cattle.io/release-name: yugaware + charts.openshift.io/name: yugaware + apiVersion: v2 + appVersion: 2.18.7.0-b30 + created: "2024-04-03T10:30:31.789926173-06:00" + description: YugabyteDB Anywhere provides deployment, orchestration, and monitoring + for managing YugabyteDB clusters. YugabyteDB Anywhere can create a YugabyteDB + cluster with multiple pods provided by Kubernetes or OpenShift and logically + grouped together to form one logical distributed database. + digest: e2f3ea8f100662d16ae15072f69d4e4b3ab03df60f7542d31ab9d7c18ce85f87 + home: https://www.yugabyte.com + icon: https://avatars0.githubusercontent.com/u/17074854?s=200&v=4 + kubeVersion: '>=1.18-0' + maintainers: + - email: sanketh@yugabyte.com + name: Sanketh Indarapu + - email: gjalla@yugabyte.com + name: Govardhan Reddy Jalla + name: yugaware + urls: + - assets/yugabyte/yugaware-2.18.7.tgz + version: 2.18.7 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: YugabyteDB Anywhere @@ -84444,6 +85612,43 @@ entries: - assets/netfoundry/ziti-host-1.5.1.tgz version: 1.5.1 zookeeper: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Apache Zookeeper + catalog.cattle.io/kube-version: '>=1.19-0' + catalog.cattle.io/release-name: zookeeper + category: Infrastructure + images: | + - name: os-shell + image: docker.io/bitnami/os-shell:12-debian-12-r16 + - name: zookeeper + image: docker.io/bitnami/zookeeper:3.9.2-debian-12-r0 + licenses: Apache-2.0 + apiVersion: v2 + appVersion: 3.9.2 + created: "2024-04-03T10:30:01.30158844-06:00" + dependencies: + - name: common + repository: file://./charts/common + tags: + - bitnami-common + version: 2.x.x + description: Apache ZooKeeper provides a reliable, centralized register of configuration + data and services for distributed applications. + digest: f264edcccfb45c681eac631165746cfb995de9e131f685c5f422f2d43f12e025 + home: https://bitnami.com + icon: https://svn.apache.org/repos/asf/comdev/project-logos/originals/zookeeper.svg + keywords: + - zookeeper + maintainers: + - name: VMware, Inc. + url: https://github.com/bitnami/charts + name: zookeeper + sources: + - https://github.com/bitnami/charts/tree/main/bitnami/zookeeper + urls: + - assets/bitnami/zookeeper-13.1.0.tgz + version: 13.1.0 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: Apache Zookeeper