diff --git a/assets/amd/amd-gpu-0.10.0.tgz b/assets/amd/amd-gpu-0.10.0.tgz new file mode 100644 index 000000000..9c6e07eb7 Binary files /dev/null and b/assets/amd/amd-gpu-0.10.0.tgz differ diff --git a/assets/argo/argo-cd-5.49.0.tgz b/assets/argo/argo-cd-5.49.0.tgz index 4986e6cd0..96911919e 100644 Binary files a/assets/argo/argo-cd-5.49.0.tgz and b/assets/argo/argo-cd-5.49.0.tgz differ diff --git a/assets/argo/argo-cd-5.51.0.tgz b/assets/argo/argo-cd-5.51.0.tgz new file mode 100644 index 000000000..1b450c95d Binary files /dev/null and b/assets/argo/argo-cd-5.51.0.tgz differ diff --git a/assets/bitnami/airflow-16.1.0.tgz b/assets/bitnami/airflow-16.1.0.tgz new file mode 100644 index 000000000..bd4fdb4a0 Binary files /dev/null and b/assets/bitnami/airflow-16.1.0.tgz differ diff --git a/assets/bitnami/cassandra-10.6.0.tgz b/assets/bitnami/cassandra-10.6.0.tgz new file mode 100644 index 000000000..a8445e03e Binary files /dev/null and b/assets/bitnami/cassandra-10.6.0.tgz differ diff --git a/assets/bitnami/kafka-26.2.1.tgz b/assets/bitnami/kafka-26.2.1.tgz new file mode 100644 index 000000000..446266a95 Binary files /dev/null and b/assets/bitnami/kafka-26.2.1.tgz differ diff --git a/assets/bitnami/mysql-9.14.2.tgz b/assets/bitnami/mysql-9.14.2.tgz new file mode 100644 index 000000000..e4987e054 Binary files /dev/null and b/assets/bitnami/mysql-9.14.2.tgz differ diff --git a/assets/bitnami/postgresql-13.2.1.tgz b/assets/bitnami/postgresql-13.2.1.tgz new file mode 100644 index 000000000..d3865f2fa Binary files /dev/null and b/assets/bitnami/postgresql-13.2.1.tgz differ diff --git a/assets/bitnami/redis-18.2.1.tgz b/assets/bitnami/redis-18.2.1.tgz new file mode 100644 index 000000000..dd7ea2734 Binary files /dev/null and b/assets/bitnami/redis-18.2.1.tgz differ diff --git a/assets/bitnami/spark-8.1.0.tgz b/assets/bitnami/spark-8.1.0.tgz new file mode 100644 index 000000000..ba75a0383 Binary files /dev/null and b/assets/bitnami/spark-8.1.0.tgz differ diff --git a/assets/bitnami/tomcat-10.11.0.tgz b/assets/bitnami/tomcat-10.11.0.tgz new file mode 100644 index 000000000..b8c8e813e Binary files /dev/null and b/assets/bitnami/tomcat-10.11.0.tgz differ diff --git a/assets/bitnami/wordpress-18.1.3.tgz b/assets/bitnami/wordpress-18.1.3.tgz new file mode 100644 index 000000000..be6769c25 Binary files /dev/null and b/assets/bitnami/wordpress-18.1.3.tgz differ diff --git a/assets/bitnami/zookeeper-12.3.0.tgz b/assets/bitnami/zookeeper-12.3.0.tgz new file mode 100644 index 000000000..23ffd83f6 Binary files /dev/null and b/assets/bitnami/zookeeper-12.3.0.tgz differ diff --git a/assets/crowdstrike/falcon-sensor-1.23.1.tgz b/assets/crowdstrike/falcon-sensor-1.23.1.tgz new file mode 100644 index 000000000..71d9dd10b Binary files /dev/null and b/assets/crowdstrike/falcon-sensor-1.23.1.tgz differ diff --git a/assets/datadog/datadog-3.43.1.tgz b/assets/datadog/datadog-3.43.1.tgz new file mode 100644 index 000000000..7e47a9fc2 Binary files /dev/null and b/assets/datadog/datadog-3.43.1.tgz differ diff --git a/assets/f5/nginx-ingress-1.0.2.tgz b/assets/f5/nginx-ingress-1.0.2.tgz new file mode 100644 index 000000000..dc55a5f0b Binary files /dev/null and b/assets/f5/nginx-ingress-1.0.2.tgz differ diff --git a/assets/haproxy/haproxy-1.34.0.tgz b/assets/haproxy/haproxy-1.34.0.tgz new file mode 100644 index 000000000..d95e29502 Binary files /dev/null and b/assets/haproxy/haproxy-1.34.0.tgz differ diff --git a/assets/harbor/harbor-1.13.1.tgz b/assets/harbor/harbor-1.13.1.tgz new file mode 100644 index 000000000..37a1caf17 Binary files /dev/null and b/assets/harbor/harbor-1.13.1.tgz differ diff --git a/assets/hashicorp/consul-1.2.3.tgz b/assets/hashicorp/consul-1.2.3.tgz new file mode 100644 index 000000000..5564f339c Binary files /dev/null and b/assets/hashicorp/consul-1.2.3.tgz differ diff --git a/assets/kasten/k10-6.0.1201.tgz b/assets/kasten/k10-6.0.1201.tgz new file mode 100644 index 000000000..25de1b96e Binary files /dev/null and b/assets/kasten/k10-6.0.1201.tgz differ diff --git a/assets/kong/kong-2.31.0.tgz b/assets/kong/kong-2.31.0.tgz new file mode 100644 index 000000000..e5a7255fb Binary files /dev/null and b/assets/kong/kong-2.31.0.tgz differ diff --git a/assets/kubecost/cost-analyzer-1.106.4.tgz b/assets/kubecost/cost-analyzer-1.106.4.tgz index 32e4cdefd..f6c722d0c 100644 Binary files a/assets/kubecost/cost-analyzer-1.106.4.tgz and b/assets/kubecost/cost-analyzer-1.106.4.tgz differ diff --git a/assets/kubecost/cost-analyzer-1.107.0.tgz b/assets/kubecost/cost-analyzer-1.107.0.tgz new file mode 100644 index 000000000..928796d3f Binary files /dev/null and b/assets/kubecost/cost-analyzer-1.107.0.tgz differ diff --git a/assets/nats/nats-1.1.4.tgz b/assets/nats/nats-1.1.4.tgz new file mode 100644 index 000000000..907418406 Binary files /dev/null and b/assets/nats/nats-1.1.4.tgz differ diff --git a/assets/redpanda/redpanda-5.6.38.tgz b/assets/redpanda/redpanda-5.6.38.tgz new file mode 100644 index 000000000..012260b34 Binary files /dev/null and b/assets/redpanda/redpanda-5.6.38.tgz differ diff --git a/assets/speedscale/speedscale-operator-1.4.0.tgz b/assets/speedscale/speedscale-operator-1.4.0.tgz new file mode 100644 index 000000000..b34a4d9a4 Binary files /dev/null and b/assets/speedscale/speedscale-operator-1.4.0.tgz differ diff --git a/assets/sysdig/sysdig-1.16.20.tgz b/assets/sysdig/sysdig-1.16.20.tgz new file mode 100644 index 000000000..fe8027c43 Binary files /dev/null and b/assets/sysdig/sysdig-1.16.20.tgz differ diff --git a/assets/weka/csi-wekafsplugin-2.3.1.tgz b/assets/weka/csi-wekafsplugin-2.3.1.tgz new file mode 100644 index 000000000..c185833ae Binary files /dev/null and b/assets/weka/csi-wekafsplugin-2.3.1.tgz differ diff --git a/charts/amd/amd-gpu/Chart.lock b/charts/amd/amd-gpu/Chart.lock index 4748a726b..4ce1f8c18 100644 --- a/charts/amd/amd-gpu/Chart.lock +++ b/charts/amd/amd-gpu/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: node-feature-discovery repository: https://kubernetes-sigs.github.io/node-feature-discovery/charts - version: 0.13.3 -digest: sha256:a4f46d22c9ecd5b82cc2ed17da0c34b0e4936f6365bb61b474ec2780e9af3636 -generated: "2023-08-23T02:41:44.856348249Z" + version: 0.14.3 +digest: sha256:a1651e3e727f3f60f286930ab341af1009cce742b181d19b9ec75d392c5c339b +generated: "2023-11-03T05:15:42.351779792Z" diff --git a/charts/amd/amd-gpu/Chart.yaml b/charts/amd/amd-gpu/Chart.yaml index 9fd90ce84..8068549a6 100644 --- a/charts/amd/amd-gpu/Chart.yaml +++ b/charts/amd/amd-gpu/Chart.yaml @@ -4,7 +4,7 @@ annotations: catalog.cattle.io/kube-version: '>= 1.18.0-0' catalog.cattle.io/release-name: amd-gpu apiVersion: v2 -appVersion: 1.25.2.4 +appVersion: 1.25.2.5 dependencies: - condition: nfd.enabled name: node-feature-discovery @@ -25,4 +25,4 @@ name: amd-gpu sources: - https://github.com/RadeonOpenCompute/k8s-device-plugin type: application -version: 0.9.0 +version: 0.10.0 diff --git a/charts/amd/amd-gpu/README.md b/charts/amd/amd-gpu/README.md index b0bb0d261..5cd7b445c 100644 --- a/charts/amd/amd-gpu/README.md +++ b/charts/amd/amd-gpu/README.md @@ -1,6 +1,6 @@ # AMD GPU Helm Chart -![Version: 0.9.0](https://img.shields.io/badge/Version-0.9.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.25.2.4](https://img.shields.io/badge/AppVersion-1.25.2.4-informational?style=flat-square) +![Version: 0.10.0](https://img.shields.io/badge/Version-0.10.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.25.2.5](https://img.shields.io/badge/AppVersion-1.25.2.5-informational?style=flat-square) A Helm chart for deploying Kubernetes AMD GPU device plugin diff --git a/charts/amd/amd-gpu/charts/node-feature-discovery/Chart.yaml b/charts/amd/amd-gpu/charts/node-feature-discovery/Chart.yaml index fafcabf8e..ba7ee404a 100644 --- a/charts/amd/amd-gpu/charts/node-feature-discovery/Chart.yaml +++ b/charts/amd/amd-gpu/charts/node-feature-discovery/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: v0.13.3 +appVersion: v0.14.3 description: 'Detects hardware features available on each node in a Kubernetes cluster, and advertises those features using node labels. ' home: https://github.com/kubernetes-sigs/node-feature-discovery @@ -11,4 +11,4 @@ name: node-feature-discovery sources: - https://github.com/kubernetes-sigs/node-feature-discovery type: application -version: 0.13.3 +version: 0.14.3 diff --git a/charts/amd/amd-gpu/charts/node-feature-discovery/README.md b/charts/amd/amd-gpu/charts/node-feature-discovery/README.md index 628ac6a36..16b5254d5 100644 --- a/charts/amd/amd-gpu/charts/node-feature-discovery/README.md +++ b/charts/amd/amd-gpu/charts/node-feature-discovery/README.md @@ -6,5 +6,5 @@ labels. NFD provides flexible configuration and extension points for a wide range of vendor and application specific node labeling needs. See -[NFD documentation](https://kubernetes-sigs.github.io/node-feature-discovery/v0.13/deployment/helm.html) +[NFD documentation](https://kubernetes-sigs.github.io/node-feature-discovery/v0.14/deployment/helm.html) for deployment instructions. diff --git a/charts/amd/amd-gpu/charts/node-feature-discovery/crds/nfd-api-crds.yaml b/charts/amd/amd-gpu/charts/node-feature-discovery/crds/nfd-api-crds.yaml index 775536f28..6866c7ffe 100644 --- a/charts/amd/amd-gpu/charts/node-feature-discovery/crds/nfd-api-crds.yaml +++ b/charts/amd/amd-gpu/charts/node-feature-discovery/crds/nfd-api-crds.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.2 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.12.1 name: nodefeatures.nfd.k8s-sigs.io spec: group: nfd.k8s-sigs.io @@ -114,8 +113,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.2 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.12.1 name: nodefeaturerules.nfd.k8s-sigs.io spec: group: nfd.k8s-sigs.io diff --git a/charts/amd/amd-gpu/charts/node-feature-discovery/templates/_helpers.tpl b/charts/amd/amd-gpu/charts/node-feature-discovery/templates/_helpers.tpl index 5a0a5c97f..928ece78f 100644 --- a/charts/amd/amd-gpu/charts/node-feature-discovery/templates/_helpers.tpl +++ b/charts/amd/amd-gpu/charts/node-feature-discovery/templates/_helpers.tpl @@ -96,12 +96,12 @@ Create the name of the service account which topologyUpdater will use {{- end -}} {{/* -Create the name of the service account which topologyGC will use +Create the name of the service account which nfd-gc will use */}} -{{- define "node-feature-discovery.topologyGC.serviceAccountName" -}} -{{- if .Values.topologyGC.serviceAccount.create -}} - {{ default (printf "%s-topology-gc" (include "node-feature-discovery.fullname" .)) .Values.topologyGC.serviceAccount.name }} +{{- define "node-feature-discovery.gc.serviceAccountName" -}} +{{- if .Values.gc.serviceAccount.create -}} + {{ default (printf "%s-gc" (include "node-feature-discovery.fullname" .)) .Values.gc.serviceAccount.name }} {{- else -}} - {{ default "default" .Values.topologyGC.serviceAccount.name }} + {{ default "default" .Values.gc.serviceAccount.name }} {{- end -}} {{- end -}} diff --git a/charts/amd/amd-gpu/charts/node-feature-discovery/templates/clusterrole.yaml b/charts/amd/amd-gpu/charts/node-feature-discovery/templates/clusterrole.yaml index 84b32644f..d4329338b 100644 --- a/charts/amd/amd-gpu/charts/node-feature-discovery/templates/clusterrole.yaml +++ b/charts/amd/amd-gpu/charts/node-feature-discovery/templates/clusterrole.yaml @@ -25,10 +25,25 @@ rules: - get - list - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create +- apiGroups: + - coordination.k8s.io + resources: + - leases + resourceNames: + - "nfd-master.nfd.kubernetes.io" + verbs: + - get + - update {{- end }} ---- {{- if and .Values.topologyUpdater.enable .Values.topologyUpdater.rbac.create }} +--- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -65,12 +80,12 @@ rules: - update {{- end }} +{{- if and .Values.gc.enable .Values.gc.rbac.create (or .Values.enableNodeFeatureApi .Values.topologyUpdater.enable) }} --- -{{- if and .Values.topologyGC.enable .Values.topologyGC.rbac.create .Values.topologyUpdater.enable }} apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: {{ include "node-feature-discovery.fullname" . }}-topology-gc + name: {{ include "node-feature-discovery.fullname" . }}-gc labels: {{- include "node-feature-discovery.labels" . | nindent 4 }} rules: @@ -94,4 +109,11 @@ rules: verbs: - delete - list +- apiGroups: + - nfd.k8s-sigs.io + resources: + - nodefeatures + verbs: + - delete + - list {{- end }} diff --git a/charts/amd/amd-gpu/charts/node-feature-discovery/templates/clusterrolebinding.yaml b/charts/amd/amd-gpu/charts/node-feature-discovery/templates/clusterrolebinding.yaml index b0a69012f..87b3003e2 100644 --- a/charts/amd/amd-gpu/charts/node-feature-discovery/templates/clusterrolebinding.yaml +++ b/charts/amd/amd-gpu/charts/node-feature-discovery/templates/clusterrolebinding.yaml @@ -15,8 +15,8 @@ subjects: namespace: {{ include "node-feature-discovery.namespace" . }} {{- end }} ---- {{- if and .Values.topologyUpdater.enable .Values.topologyUpdater.rbac.create }} +--- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: @@ -33,20 +33,20 @@ subjects: namespace: {{ include "node-feature-discovery.namespace" . }} {{- end }} +{{- if and .Values.gc.enable .Values.gc.rbac.create (or .Values.enableNodeFeatureApi .Values.topologyUpdater.enable) }} --- -{{- if and .Values.topologyGC.enable .Values.topologyGC.rbac.create .Values.topologyUpdater.enable }} apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: {{ include "node-feature-discovery.fullname" . }}-topology-gc + name: {{ include "node-feature-discovery.fullname" . }}-gc labels: {{- include "node-feature-discovery.labels" . | nindent 4 }} roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: {{ include "node-feature-discovery.fullname" . }}-topology-gc + name: {{ include "node-feature-discovery.fullname" . }}-gc subjects: - kind: ServiceAccount - name: {{ .Values.topologyGC.serviceAccount.name | default "nfd-topology-gc" }} + name: {{ include "node-feature-discovery.gc.serviceAccountName" . }} namespace: {{ include "node-feature-discovery.namespace" . }} {{- end }} diff --git a/charts/amd/amd-gpu/charts/node-feature-discovery/templates/master.yaml b/charts/amd/amd-gpu/charts/node-feature-discovery/templates/master.yaml index 418ac089d..e77ca136c 100644 --- a/charts/amd/amd-gpu/charts/node-feature-discovery/templates/master.yaml +++ b/charts/amd/amd-gpu/charts/node-feature-discovery/templates/master.yaml @@ -6,8 +6,10 @@ metadata: labels: {{- include "node-feature-discovery.labels" . | nindent 4 }} role: master + {{- with .Values.master.deploymentAnnotations }} annotations: - {{- toYaml .Values.master.deploymentAnnotations | nindent 4 }} + {{- toYaml . | nindent 4 }} + {{- end }} spec: replicas: {{ .Values.master.replicaCount }} selector: @@ -19,8 +21,10 @@ spec: labels: {{- include "node-feature-discovery.selectorLabels" . | nindent 8 }} role: master + {{- with .Values.master.annotations }} annotations: - {{- toYaml .Values.master.annotations | nindent 8 }} + {{- toYaml . | nindent 8 }} + {{- end }} spec: {{- with .Values.imagePullSecrets }} imagePullSecrets: @@ -66,6 +70,8 @@ spec: ports: - containerPort: {{ .Values.master.port | default "8080" }} name: grpc + - containerPort: {{ .Values.master.metricsPort | default "8081" }} + name: metrics env: - name: NODE_NAME valueFrom: @@ -80,8 +86,10 @@ spec: - "-instance={{ .Values.master.instance }}" {{- end }} - "-port={{ .Values.master.port | default "8080" }}" - {{- if .Values.enableNodeFeatureApi }} - - "-enable-nodefeature-api" + {{- if not .Values.enableNodeFeatureApi }} + - "-enable-nodefeature-api=false" + {{- else if gt (int .Values.master.replicaCount) 1 }} + - "-enable-leader-election" {{- end }} {{- if .Values.master.extraLabelNs | empty | not }} - "-extra-label-ns={{- join "," .Values.master.extraLabelNs }}" @@ -99,16 +107,23 @@ spec: - "-crd-controller={{ .Values.master.crdController }}" {{- else }} ## By default, disable crd controller for other than the default instances - - "-featurerules-controller={{ .Values.master.instance | empty }}" + - "-crd-controller={{ .Values.master.instance | empty }}" {{- end }} {{- if .Values.master.featureRulesController | kindIs "invalid" | not }} - "-featurerules-controller={{ .Values.master.featureRulesController }}" {{- end }} + {{- if .Values.master.resyncPeriod }} + - "-resync-period={{ .Values.master.resyncPeriod }}" + {{- end }} + {{- if .Values.master.nfdApiParallelism | empty | not }} + - "-nfd-api-parallelism={{ .Values.master.nfdApiParallelism }}" + {{- end }} {{- if .Values.tls.enable }} - "-ca-file=/etc/kubernetes/node-feature-discovery/certs/ca.crt" - "-key-file=/etc/kubernetes/node-feature-discovery/certs/tls.key" - "-cert-file=/etc/kubernetes/node-feature-discovery/certs/tls.crt" {{- end }} + - "-metrics={{ .Values.master.metricsPort | default "8081" }}" volumeMounts: {{- if .Values.tls.enable }} - name: nfd-master-cert @@ -130,7 +145,6 @@ spec: items: - key: nfd-master.conf path: nfd-master.conf - {{- with .Values.master.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} diff --git a/charts/amd/amd-gpu/charts/node-feature-discovery/templates/topology-gc.yaml b/charts/amd/amd-gpu/charts/node-feature-discovery/templates/nfd-gc.yaml similarity index 53% rename from charts/amd/amd-gpu/charts/node-feature-discovery/templates/topology-gc.yaml rename to charts/amd/amd-gpu/charts/node-feature-discovery/templates/nfd-gc.yaml index 642fec455..d803eef40 100644 --- a/charts/amd/amd-gpu/charts/node-feature-discovery/templates/topology-gc.yaml +++ b/charts/amd/amd-gpu/charts/node-feature-discovery/templates/nfd-gc.yaml @@ -1,36 +1,42 @@ -{{- if and .Values.topologyGC.enable .Values.topologyUpdater.enable -}} +{{- if and .Values.gc.enable (or .Values.enableNodeFeatureApi .Values.topologyUpdater.enable) -}} apiVersion: apps/v1 kind: Deployment metadata: - name: {{ include "node-feature-discovery.fullname" . }}-topology-gc + name: {{ include "node-feature-discovery.fullname" . }}-gc namespace: {{ include "node-feature-discovery.namespace" . }} labels: {{- include "node-feature-discovery.labels" . | nindent 4 }} - role: topology-gc + role: gc + {{- with .Values.gc.deploymentAnnotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} spec: - replicas: {{ .Values.topologyGC.replicaCount | default 1 }} + replicas: {{ .Values.gc.replicaCount | default 1 }} selector: matchLabels: {{- include "node-feature-discovery.selectorLabels" . | nindent 6 }} - role: topology-gc + role: gc template: metadata: labels: {{- include "node-feature-discovery.selectorLabels" . | nindent 8 }} - role: topology-gc + role: gc + {{- with .Values.gc.annotations }} annotations: - {{- toYaml .Values.topologyGC.annotations | nindent 8 }} + {{- toYaml . | nindent 8 }} + {{- end }} spec: - serviceAccountName: {{ .Values.topologyGC.serviceAccountName | default "nfd-topology-gc" }} + serviceAccountName: {{ include "node-feature-discovery.gc.serviceAccountName" . }} dnsPolicy: ClusterFirstWithHostNet {{- with .Values.imagePullSecrets }} imagePullSecrets: {{- toYaml . | nindent 8 }} {{- end }} securityContext: - {{- toYaml .Values.topologyGC.podSecurityContext | nindent 8 }} + {{- toYaml .Values.gc.podSecurityContext | nindent 8 }} containers: - - name: topology-gc + - name: gc image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: "{{ .Values.image.pullPolicy }}" env: @@ -39,25 +45,29 @@ spec: fieldRef: fieldPath: spec.nodeName command: - - "nfd-topology-gc" + - "nfd-gc" args: - {{- if .Values.topologyGC.interval | empty | not }} - - "-gc-interval={{ .Values.topologyGC.interval }}" + {{- if .Values.gc.interval | empty | not }} + - "-gc-interval={{ .Values.gc.interval }}" {{- end }} resources: - {{- toYaml .Values.topologyGC.resources | nindent 12 }} + {{- toYaml .Values.gc.resources | nindent 12 }} securityContext: - {{- toYaml .Values.topologyGC.securityContext | nindent 12 }} + allowPrivilegeEscalation: false + capabilities: + drop: [ "ALL" ] + readOnlyRootFilesystem: true + runAsNonRoot: true - {{- with .Values.topologyGC.nodeSelector }} + {{- with .Values.gc.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} {{- end }} - {{- with .Values.topologyGC.affinity }} + {{- with .Values.gc.affinity }} affinity: {{- toYaml . | nindent 8 }} {{- end }} - {{- with .Values.topologyGC.tolerations }} + {{- with .Values.gc.tolerations }} tolerations: {{- toYaml . | nindent 8 }} {{- end }} diff --git a/charts/amd/amd-gpu/charts/node-feature-discovery/templates/prometheus.yaml b/charts/amd/amd-gpu/charts/node-feature-discovery/templates/prometheus.yaml new file mode 100644 index 000000000..b9f4b4640 --- /dev/null +++ b/charts/amd/amd-gpu/charts/node-feature-discovery/templates/prometheus.yaml @@ -0,0 +1,26 @@ +{{- if .Values.prometheus.enable }} +# Prometheus Monitor Service (Metrics) +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: {{ include "node-feature-discovery.fullname" . }} + labels: + {{- include "node-feature-discovery.selectorLabels" . | nindent 4 }} + {{- with .Values.prometheus.labels }} + {{ toYaml . | nindent 4 }} + {{- end }} +spec: + podMetricsEndpoints: + - honorLabels: true + interval: 10s + path: /metrics + port: metrics + scheme: http + namespaceSelector: + matchNames: + - {{ include "node-feature-discovery.namespace" . }} + selector: + matchExpressions: + - {key: app.kubernetes.io/instance, operator: In, values: ["{{ .Release.Name }}"]} + - {key: app.kubernetes.io/name, operator: In, values: ["{{ include "node-feature-discovery.name" . }}"]} +{{- end }} diff --git a/charts/amd/amd-gpu/charts/node-feature-discovery/templates/role.yaml b/charts/amd/amd-gpu/charts/node-feature-discovery/templates/role.yaml index f63cb8ff4..c71ede442 100644 --- a/charts/amd/amd-gpu/charts/node-feature-discovery/templates/role.yaml +++ b/charts/amd/amd-gpu/charts/node-feature-discovery/templates/role.yaml @@ -3,6 +3,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: {{ include "node-feature-discovery.fullname" . }}-worker + namespace: {{ include "node-feature-discovery.namespace" . }} labels: {{- include "node-feature-discovery.labels" . | nindent 4 }} rules: diff --git a/charts/amd/amd-gpu/charts/node-feature-discovery/templates/rolebinding.yaml b/charts/amd/amd-gpu/charts/node-feature-discovery/templates/rolebinding.yaml index 30a00381f..d8025be9b 100644 --- a/charts/amd/amd-gpu/charts/node-feature-discovery/templates/rolebinding.yaml +++ b/charts/amd/amd-gpu/charts/node-feature-discovery/templates/rolebinding.yaml @@ -3,6 +3,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: {{ include "node-feature-discovery.fullname" . }}-worker + namespace: {{ include "node-feature-discovery.namespace" . }} labels: {{- include "node-feature-discovery.labels" . | nindent 4 }} roleRef: diff --git a/charts/amd/amd-gpu/charts/node-feature-discovery/templates/serviceaccount.yaml b/charts/amd/amd-gpu/charts/node-feature-discovery/templates/serviceaccount.yaml index 03211e7c4..34dc8b753 100644 --- a/charts/amd/amd-gpu/charts/node-feature-discovery/templates/serviceaccount.yaml +++ b/charts/amd/amd-gpu/charts/node-feature-discovery/templates/serviceaccount.yaml @@ -12,8 +12,8 @@ metadata: {{- end }} {{- end }} ---- {{- if and .Values.topologyUpdater.enable .Values.topologyUpdater.serviceAccount.create }} +--- apiVersion: v1 kind: ServiceAccount metadata: @@ -27,23 +27,23 @@ metadata: {{- end }} {{- end }} +{{- if and .Values.gc.enable .Values.gc.serviceAccount.create (or .Values.enableNodeFeatureApi .Values.topologyUpdater.enable) }} --- -{{- if and .Values.topologyGC.enable .Values.topologyGC.serviceAccount.create .Values.topologyUpdater.enable }} apiVersion: v1 kind: ServiceAccount metadata: - name: {{ .Values.topologyGC.serviceAccount.name | default "nfd-topology-gc" }} + name: {{ include "node-feature-discovery.gc.serviceAccountName" . }} namespace: {{ include "node-feature-discovery.namespace" . }} labels: {{- include "node-feature-discovery.labels" . | nindent 4 }} - {{- with .Values.topologyUpdater.serviceAccount.annotations }} + {{- with .Values.gc.serviceAccount.annotations }} annotations: {{- toYaml . | nindent 4 }} {{- end }} {{- end }} ---- {{- if .Values.worker.serviceAccount.create }} +--- apiVersion: v1 kind: ServiceAccount metadata: diff --git a/charts/amd/amd-gpu/charts/node-feature-discovery/templates/topologyupdater.yaml b/charts/amd/amd-gpu/charts/node-feature-discovery/templates/topologyupdater.yaml index cd3fca051..f51c10e6d 100644 --- a/charts/amd/amd-gpu/charts/node-feature-discovery/templates/topologyupdater.yaml +++ b/charts/amd/amd-gpu/charts/node-feature-discovery/templates/topologyupdater.yaml @@ -7,6 +7,10 @@ metadata: labels: {{- include "node-feature-discovery.labels" . | nindent 4 }} role: topology-updater + {{- with .Values.topologyUpdater.daemonsetAnnotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} spec: selector: matchLabels: @@ -17,8 +21,10 @@ spec: labels: {{- include "node-feature-discovery.selectorLabels" . | nindent 8 }} role: topology-updater + {{- with .Values.topologyUpdater.annotations }} annotations: - {{- toYaml .Values.topologyUpdater.annotations | nindent 8 }} + {{- toYaml . | nindent 8 }} + {{- end }} spec: serviceAccountName: {{ include "node-feature-discovery.topologyUpdater.serviceAccountName" . }} dnsPolicy: ClusterFirstWithHostNet @@ -37,6 +43,10 @@ spec: valueFrom: fieldRef: fieldPath: spec.nodeName + - name: NODE_ADDRESS + valueFrom: + fieldRef: + fieldPath: status.hostIP command: - "nfd-topology-updater" args: @@ -66,6 +76,10 @@ spec: # Disable kubelet state tracking by giving an empty path - "-kubelet-state-dir=" {{- end }} + - -metrics={{ .Values.topologyUpdater.metricsPort | default "8081"}} + ports: + - name: metrics + containerPort: {{ .Values.topologyUpdater.metricsPort | default "8081"}} volumeMounts: {{- if .Values.topologyUpdater.kubeletConfigPath | empty | not }} - name: kubelet-config diff --git a/charts/amd/amd-gpu/charts/node-feature-discovery/templates/worker.yaml b/charts/amd/amd-gpu/charts/node-feature-discovery/templates/worker.yaml index c1240bdc9..0e56eb5d1 100644 --- a/charts/amd/amd-gpu/charts/node-feature-discovery/templates/worker.yaml +++ b/charts/amd/amd-gpu/charts/node-feature-discovery/templates/worker.yaml @@ -6,8 +6,10 @@ metadata: labels: {{- include "node-feature-discovery.labels" . | nindent 4 }} role: worker + {{- with .Values.worker.daemonsetAnnotations }} annotations: - {{- toYaml .Values.worker.daemonsetAnnotations | nindent 4 }} + {{- toYaml . | nindent 4 }} + {{- end }} spec: selector: matchLabels: @@ -18,8 +20,10 @@ spec: labels: {{- include "node-feature-discovery.selectorLabels" . | nindent 8 }} role: worker + {{- with .Values.worker.annotations }} annotations: - {{- toYaml .Values.worker.annotations | nindent 8 }} + {{- toYaml . | nindent 8 }} + {{- end }} spec: dnsPolicy: ClusterFirstWithHostNet {{- with .Values.imagePullSecrets }} @@ -46,14 +50,18 @@ spec: - "nfd-worker" args: - "-server={{ include "node-feature-discovery.fullname" . }}-master:{{ .Values.master.service.port }}" - {{- if .Values.enableNodeFeatureApi }} - - "-enable-nodefeature-api" + {{- if not .Values.enableNodeFeatureApi }} + - "-enable-nodefeature-api=false" {{- end }} {{- if .Values.tls.enable }} - "-ca-file=/etc/kubernetes/node-feature-discovery/certs/ca.crt" - "-key-file=/etc/kubernetes/node-feature-discovery/certs/tls.key" - "-cert-file=/etc/kubernetes/node-feature-discovery/certs/tls.crt" {{- end }} + - "-metrics={{ .Values.worker.metricsPort | default "8081"}}" + ports: + - name: metrics + containerPort: {{ .Values.worker.metricsPort | default "8081"}} volumeMounts: - name: host-boot mountPath: "/host-boot" diff --git a/charts/amd/amd-gpu/charts/node-feature-discovery/values.yaml b/charts/amd/amd-gpu/charts/node-feature-discovery/values.yaml index d3db4355d..2291aef4f 100644 --- a/charts/amd/amd-gpu/charts/node-feature-discovery/values.yaml +++ b/charts/amd/amd-gpu/charts/node-feature-discovery/values.yaml @@ -10,7 +10,7 @@ nameOverride: "" fullnameOverride: "" namespaceOverride: "" -enableNodeFeatureApi: false +enableNodeFeatureApi: true master: config: ### @@ -20,17 +20,43 @@ master: # resourceLabels: ["vendor-1.com/feature-1","vendor-2.io/feature-2"] # enableTaints: false # labelWhiteList: "foo" + # resyncPeriod: "2h" + # klog: + # addDirHeader: false + # alsologtostderr: false + # logBacktraceAt: + # logtostderr: true + # skipHeaders: false + # stderrthreshold: 2 + # v: 0 + # vmodule: + ## NOTE: the following options are not dynamically run-time configurable + ## and require a nfd-master restart to take effect after being changed + # logDir: + # logFile: + # logFileMaxSize: 1800 + # skipLogHeaders: false + # leaderElection: + # leaseDuration: 15s + # # this value has to be lower than leaseDuration and greater than retryPeriod*1.2 + # renewDeadline: 10s + # # this value has to be greater than 0 + # retryPeriod: 2s + # nfdApiParallelism: 10 ### # The TCP port that nfd-master listens for incoming requests. Default: 8080 port: 8080 + metricsPort: 8081 instance: featureApi: + resyncPeriod: denyLabelNs: [] extraLabelNs: [] resourceLabels: [] enableTaints: false crdController: null featureRulesController: null + nfdApiParallelism: null deploymentAnnotations: {} replicaCount: 1 @@ -154,6 +180,7 @@ worker: # - "SSE4" # - "SSE42" # - "SSSE3" + # - "TDX_GUEST" # attributeWhitelist: # kernel: # kconfigFile: "/path/to/kconfig" @@ -183,7 +210,7 @@ worker: # - "vendor" # - "device" # local: - # hooksEnabled: true + # hooksEnabled: false # custom: # # The following feature demonstrates the capabilities of the matchFeatures # - name: "my custom rule" @@ -332,6 +359,7 @@ worker: # ### + metricsPort: 8081 daemonsetAnnotations: {} podSecurityContext: {} # fsGroup: 2000 @@ -404,6 +432,7 @@ topologyUpdater: rbac: create: true + metricsPort: 8081 kubeletConfigPath: kubeletPodResourcesSockPath: updateInterval: 60s @@ -433,10 +462,11 @@ topologyUpdater: nodeSelector: {} tolerations: [] annotations: {} + daemonsetAnnotations: {} affinity: {} podSetFingerprint: true -topologyGC: +gc: enable: true replicaCount: 1 @@ -450,12 +480,6 @@ topologyGC: interval: 1h podSecurityContext: {} - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: [ "ALL" ] - readOnlyRootFilesystem: true - runAsNonRoot: true resources: {} # We usually recommend not to specify default resources and to leave this as a conscious @@ -472,6 +496,7 @@ topologyGC: nodeSelector: {} tolerations: [] annotations: {} + deploymentAnnotations: {} affinity: {} # Optionally use encryption for worker <--> master comms @@ -482,3 +507,7 @@ topologyGC: tls: enable: false certManager: false + +prometheus: + enable: false + labels: {} diff --git a/charts/amd/amd-gpu/values.yaml b/charts/amd/amd-gpu/values.yaml index 242fffb18..79da1ffc9 100644 --- a/charts/amd/amd-gpu/values.yaml +++ b/charts/amd/amd-gpu/values.yaml @@ -10,13 +10,13 @@ dp: image: repository: docker.io/rocm/k8s-device-plugin # Overrides the image tag whose default is the chart appVersion. - tag: "1.25.2.4" + tag: "1.25.2.5" resources: {} lbl: image: repository: docker.io/rocm/k8s-device-plugin - tag: "labeller-1.25.2.4" + tag: "labeller-1.25.2.5" resources: {} imagePullSecrets: [] diff --git a/charts/argo/argo-cd/Chart.yaml b/charts/argo/argo-cd/Chart.yaml index 95144722c..22993096b 100644 --- a/charts/argo/argo-cd/Chart.yaml +++ b/charts/argo/argo-cd/Chart.yaml @@ -1,7 +1,7 @@ annotations: artifacthub.io/changes: | - - kind: added - description: Add notification cluster role support + - kind: changed + description: Upgrade Argo CD to v2.9.0 artifacthub.io/signKey: | fingerprint: 2B8F22F57260EFA67BE1C5824B11F800CD9D2252 url: https://argoproj.github.io/argo-helm/pgp_keys.asc @@ -11,7 +11,7 @@ annotations: catalog.cattle.io/kube-version: '>=1.23.0-0' catalog.cattle.io/release-name: argo-cd apiVersion: v2 -appVersion: v2.8.5 +appVersion: v2.9.0 dependencies: - condition: redis-ha.enabled name: redis-ha @@ -33,4 +33,4 @@ name: argo-cd sources: - https://github.com/argoproj/argo-helm/tree/main/charts/argo-cd - https://github.com/argoproj/argo-cd -version: 5.49.0 +version: 5.51.0 diff --git a/charts/argo/argo-cd/templates/argocd-applicationset/deployment.yaml b/charts/argo/argo-cd/templates/argocd-applicationset/deployment.yaml index ed161d538..b704650ae 100644 --- a/charts/argo/argo-cd/templates/argocd-applicationset/deployment.yaml +++ b/charts/argo/argo-cd/templates/argocd-applicationset/deployment.yaml @@ -90,6 +90,18 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + - name: ARGOCD_APPLICATIONSET_CONTROLLER_GLOBAL_PRESERVED_ANNOTATIONS + valueFrom: + configMapKeyRef: + key: applicationsetcontroller.global.preserved.annotations + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_APPLICATIONSET_CONTROLLER_GLOBAL_PRESERVED_LABELS + valueFrom: + configMapKeyRef: + key: applicationsetcontroller.global.preserved.labels + name: argocd-cmd-params-cm + optional: true - name: ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_LEADER_ELECTION valueFrom: configMapKeyRef: diff --git a/charts/argo/argo-cd/templates/argocd-notifications/clusterrole.yaml b/charts/argo/argo-cd/templates/argocd-notifications/clusterrole.yaml index 1d2e0fd7f..927d30a05 100644 --- a/charts/argo/argo-cd/templates/argocd-notifications/clusterrole.yaml +++ b/charts/argo/argo-cd/templates/argocd-notifications/clusterrole.yaml @@ -1,4 +1,4 @@ -{{- if .Values.createClusterRoles }} +{{- if and .Values.notifications.enabled .Values.createClusterRoles }} apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: diff --git a/charts/argo/argo-cd/templates/argocd-notifications/clusterrolebinding.yaml b/charts/argo/argo-cd/templates/argocd-notifications/clusterrolebinding.yaml index 1b47bf123..3dba71a2f 100644 --- a/charts/argo/argo-cd/templates/argocd-notifications/clusterrolebinding.yaml +++ b/charts/argo/argo-cd/templates/argocd-notifications/clusterrolebinding.yaml @@ -1,4 +1,4 @@ -{{- if .Values.createClusterRoles }} +{{- if and .Values.notifications.enabled .Values.createClusterRoles }} apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: diff --git a/charts/argo/argo-cd/templates/argocd-repo-server/deployment.yaml b/charts/argo/argo-cd/templates/argocd-repo-server/deployment.yaml index 515984d7a..90941428e 100644 --- a/charts/argo/argo-cd/templates/argocd-repo-server/deployment.yaml +++ b/charts/argo/argo-cd/templates/argocd-repo-server/deployment.yaml @@ -231,6 +231,18 @@ spec: key: reposerver.streamed.manifest.max.extracted.size name: argocd-cmd-params-cm optional: true + - name: ARGOCD_REPO_SERVER_HELM_MANIFEST_MAX_EXTRACTED_SIZE + valueFrom: + configMapKeyRef: + key: reposerver.helm.manifest.max.extracted.size + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_REPO_SERVER_DISABLE_HELM_MANIFEST_MAX_EXTRACTED_SIZE + valueFrom: + configMapKeyRef: + name: argocd-cmd-params-cm + key: reposerver.disable.helm.manifest.max.extracted.size + optional: true - name: ARGOCD_GIT_MODULES_ENABLED valueFrom: configMapKeyRef: diff --git a/charts/argo/argo-cd/templates/argocd-server/clusterrole.yaml b/charts/argo/argo-cd/templates/argocd-server/clusterrole.yaml index 838016fa0..44d50515c 100644 --- a/charts/argo/argo-cd/templates/argocd-server/clusterrole.yaml +++ b/charts/argo/argo-cd/templates/argocd-server/clusterrole.yaml @@ -40,6 +40,7 @@ rules: - argoproj.io resources: - applications + - applicationsets verbs: - get - list diff --git a/charts/argo/argo-cd/templates/crds/crd-application.yaml b/charts/argo/argo-cd/templates/crds/crd-application.yaml index b4d13a15e..034015741 100644 --- a/charts/argo/argo-cd/templates/crds/crd-application.yaml +++ b/charts/argo/argo-cd/templates/crds/crd-application.yaml @@ -359,6 +359,37 @@ spec: description: Namespace sets the namespace that Kustomize adds to all resources type: string + patches: + description: Patches is a list of Kustomize patches + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: description: Replicas is a list of Kustomize Replicas override specifications @@ -657,6 +688,37 @@ spec: description: Namespace sets the namespace that Kustomize adds to all resources type: string + patches: + description: Patches is a list of Kustomize patches + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: description: Replicas is a list of Kustomize Replicas override specifications @@ -797,7 +859,8 @@ spec: properties: name: description: Name is an alternate way of specifying the target - cluster by its symbolic name + cluster by its symbolic name. This must be set if Server is + not set. type: string namespace: description: Namespace specifies the target namespace for the @@ -805,8 +868,9 @@ spec: namespace-scoped resources that have not set a value for .metadata.namespace type: string server: - description: Server specifies the URL of the target cluster and - must be set to the Kubernetes control plane API + description: Server specifies the URL of the target cluster's + Kubernetes control plane API. This must be set if Name is not + set. type: string type: object ignoreDifferences: @@ -1067,6 +1131,37 @@ spec: description: Namespace sets the namespace that Kustomize adds to all resources type: string + patches: + description: Patches is a list of Kustomize patches + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: description: Replicas is a list of Kustomize Replicas override specifications @@ -1355,6 +1450,37 @@ spec: description: Namespace sets the namespace that Kustomize adds to all resources type: string + patches: + description: Patches is a list of Kustomize patches + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: description: Replicas is a list of Kustomize Replicas override specifications @@ -1796,6 +1922,37 @@ spec: description: Namespace sets the namespace that Kustomize adds to all resources type: string + patches: + description: Patches is a list of Kustomize patches + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: description: Replicas is a list of Kustomize Replicas override specifications @@ -2097,6 +2254,37 @@ spec: description: Namespace sets the namespace that Kustomize adds to all resources type: string + patches: + description: Patches is a list of Kustomize patches + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: description: Replicas is a list of Kustomize Replicas override specifications @@ -2542,6 +2730,37 @@ spec: description: Namespace sets the namespace that Kustomize adds to all resources type: string + patches: + description: Patches is a list of Kustomize patches + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: description: Replicas is a list of Kustomize Replicas override specifications @@ -2860,6 +3079,38 @@ spec: description: Namespace sets the namespace that Kustomize adds to all resources type: string + patches: + description: Patches is a list of Kustomize + patches + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: description: Replicas is a list of Kustomize Replicas override specifications @@ -3292,6 +3543,37 @@ spec: description: Namespace sets the namespace that Kustomize adds to all resources type: string + patches: + description: Patches is a list of Kustomize patches + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: description: Replicas is a list of Kustomize Replicas override specifications @@ -3603,6 +3885,37 @@ spec: description: Namespace sets the namespace that Kustomize adds to all resources type: string + patches: + description: Patches is a list of Kustomize patches + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: description: Replicas is a list of Kustomize Replicas override specifications @@ -3804,7 +4117,8 @@ spec: properties: name: description: Name is an alternate way of specifying the - target cluster by its symbolic name + target cluster by its symbolic name. This must be set + if Server is not set. type: string namespace: description: Namespace specifies the target namespace @@ -3813,8 +4127,9 @@ spec: not set a value for .metadata.namespace type: string server: - description: Server specifies the URL of the target cluster - and must be set to the Kubernetes control plane API + description: Server specifies the URL of the target cluster's + Kubernetes control plane API. This must be set if Name + is not set. type: string type: object ignoreDifferences: @@ -4056,6 +4371,37 @@ spec: description: Namespace sets the namespace that Kustomize adds to all resources type: string + patches: + description: Patches is a list of Kustomize patches + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: description: Replicas is a list of Kustomize Replicas override specifications @@ -4367,6 +4713,37 @@ spec: description: Namespace sets the namespace that Kustomize adds to all resources type: string + patches: + description: Patches is a list of Kustomize patches + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: description: Replicas is a list of Kustomize Replicas override specifications diff --git a/charts/argo/argo-cd/templates/crds/crd-applicationset.yaml b/charts/argo/argo-cd/templates/crds/crd-applicationset.yaml index fa0d926a7..8d7409e57 100644 --- a/charts/argo/argo-cd/templates/crds/crd-applicationset.yaml +++ b/charts/argo/argo-cd/templates/crds/crd-applicationset.yaml @@ -269,6 +269,36 @@ spec: type: string namespace: type: string + patches: + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: items: properties: @@ -449,6 +479,36 @@ spec: type: string namespace: type: string + patches: + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: items: properties: @@ -788,6 +848,36 @@ spec: type: string namespace: type: string + patches: + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: items: properties: @@ -968,6 +1058,36 @@ spec: type: string namespace: type: string + patches: + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: items: properties: @@ -1311,6 +1431,36 @@ spec: type: string namespace: type: string + patches: + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: items: properties: @@ -1491,6 +1641,36 @@ spec: type: string namespace: type: string + patches: + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: items: properties: @@ -1814,6 +1994,36 @@ spec: type: string namespace: type: string + patches: + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: items: properties: @@ -1994,6 +2204,36 @@ spec: type: string namespace: type: string + patches: + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: items: properties: @@ -2341,6 +2581,36 @@ spec: type: string namespace: type: string + patches: + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: items: properties: @@ -2521,6 +2791,36 @@ spec: type: string namespace: type: string + patches: + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: items: properties: @@ -2860,6 +3160,36 @@ spec: type: string namespace: type: string + patches: + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: items: properties: @@ -3040,6 +3370,36 @@ spec: type: string namespace: type: string + patches: + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: items: properties: @@ -3383,6 +3743,36 @@ spec: type: string namespace: type: string + patches: + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: items: properties: @@ -3563,6 +3953,36 @@ spec: type: string namespace: type: string + patches: + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: items: properties: @@ -3886,6 +4306,36 @@ spec: type: string namespace: type: string + patches: + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: items: properties: @@ -4066,6 +4516,36 @@ spec: type: string namespace: type: string + patches: + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: items: properties: @@ -4399,6 +4879,36 @@ spec: type: string namespace: type: string + patches: + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: items: properties: @@ -4579,6 +5089,36 @@ spec: type: string namespace: type: string + patches: + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: items: properties: @@ -5092,6 +5632,36 @@ spec: type: string namespace: type: string + patches: + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: items: properties: @@ -5272,6 +5842,36 @@ spec: type: string namespace: type: string + patches: + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: items: properties: @@ -5564,6 +6164,8 @@ spec: type: string group: type: string + includeSharedProjects: + type: boolean includeSubgroups: type: boolean insecure: @@ -5578,6 +6180,8 @@ spec: - key - secretName type: object + topic: + type: string required: - group type: object @@ -5776,6 +6380,36 @@ spec: type: string namespace: type: string + patches: + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: items: properties: @@ -5956,6 +6590,36 @@ spec: type: string namespace: type: string + patches: + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: items: properties: @@ -6293,6 +6957,36 @@ spec: type: string namespace: type: string + patches: + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: items: properties: @@ -6473,6 +7167,36 @@ spec: type: string namespace: type: string + patches: + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: items: properties: @@ -6820,6 +7544,36 @@ spec: type: string namespace: type: string + patches: + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: items: properties: @@ -7000,6 +7754,36 @@ spec: type: string namespace: type: string + patches: + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: items: properties: @@ -7339,6 +8123,36 @@ spec: type: string namespace: type: string + patches: + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: items: properties: @@ -7519,6 +8333,36 @@ spec: type: string namespace: type: string + patches: + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: items: properties: @@ -7862,6 +8706,36 @@ spec: type: string namespace: type: string + patches: + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: items: properties: @@ -8042,6 +8916,36 @@ spec: type: string namespace: type: string + patches: + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: items: properties: @@ -8365,6 +9269,36 @@ spec: type: string namespace: type: string + patches: + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: items: properties: @@ -8545,6 +9479,36 @@ spec: type: string namespace: type: string + patches: + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: items: properties: @@ -8878,6 +9842,36 @@ spec: type: string namespace: type: string + patches: + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: items: properties: @@ -9058,6 +10052,36 @@ spec: type: string namespace: type: string + patches: + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: items: properties: @@ -9571,6 +10595,36 @@ spec: type: string namespace: type: string + patches: + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: items: properties: @@ -9751,6 +10805,36 @@ spec: type: string namespace: type: string + patches: + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: items: properties: @@ -10043,6 +11127,8 @@ spec: type: string group: type: string + includeSharedProjects: + type: boolean includeSubgroups: type: boolean insecure: @@ -10057,6 +11143,8 @@ spec: - key - secretName type: object + topic: + type: string required: - group type: object @@ -10255,6 +11343,36 @@ spec: type: string namespace: type: string + patches: + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: items: properties: @@ -10435,6 +11553,36 @@ spec: type: string namespace: type: string + patches: + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: items: properties: @@ -10776,6 +11924,36 @@ spec: type: string namespace: type: string + patches: + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: items: properties: @@ -10956,6 +12134,36 @@ spec: type: string namespace: type: string + patches: + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: items: properties: @@ -11286,6 +12494,36 @@ spec: type: string namespace: type: string + patches: + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: items: properties: @@ -11466,6 +12704,36 @@ spec: type: string namespace: type: string + patches: + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: items: properties: @@ -11979,6 +13247,36 @@ spec: type: string namespace: type: string + patches: + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: items: properties: @@ -12159,6 +13457,36 @@ spec: type: string namespace: type: string + patches: + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: items: properties: @@ -12451,6 +13779,8 @@ spec: type: string group: type: string + includeSharedProjects: + type: boolean includeSubgroups: type: boolean insecure: @@ -12465,6 +13795,8 @@ spec: - key - secretName type: object + topic: + type: string required: - group type: object @@ -12663,6 +13995,36 @@ spec: type: string namespace: type: string + patches: + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: items: properties: @@ -12843,6 +14205,36 @@ spec: type: string namespace: type: string + patches: + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: items: properties: @@ -12994,12 +14386,31 @@ spec: items: type: string type: array + ignoreApplicationDifferences: + items: + properties: + jqPathExpressions: + items: + type: string + type: array + jsonPointers: + items: + type: string + type: array + name: + type: string + type: object + type: array preservedFields: properties: annotations: items: type: string type: array + labels: + items: + type: string + type: array type: object strategy: properties: @@ -13236,6 +14647,36 @@ spec: type: string namespace: type: string + patches: + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: items: properties: @@ -13416,6 +14857,36 @@ spec: type: string namespace: type: string + patches: + items: + properties: + options: + additionalProperties: + type: boolean + type: object + patch: + type: string + path: + type: string + target: + properties: + annotationSelector: + type: string + group: + type: string + kind: + type: string + labelSelector: + type: string + name: + type: string + namespace: + type: string + version: + type: string + type: object + type: object + type: array replicas: items: properties: diff --git a/charts/bitnami/airflow/Chart.yaml b/charts/bitnami/airflow/Chart.yaml index 268471bb2..e61e28719 100644 --- a/charts/bitnami/airflow/Chart.yaml +++ b/charts/bitnami/airflow/Chart.yaml @@ -50,4 +50,4 @@ maintainers: name: airflow sources: - https://github.com/bitnami/charts/tree/main/bitnami/airflow -version: 16.0.7 +version: 16.1.0 diff --git a/charts/bitnami/airflow/README.md b/charts/bitnami/airflow/README.md index 4bb92541b..b9a65ccca 100644 --- a/charts/bitnami/airflow/README.md +++ b/charts/bitnami/airflow/README.md @@ -108,209 +108,221 @@ The command removes all the Kubernetes components associated with the chart and ### Airflow web parameters -| Name | Description | Value | -| ------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | ------------------------- | -| `web.image.registry` | Airflow image registry | `REGISTRY_NAME` | -| `web.image.repository` | Airflow image repository | `REPOSITORY_NAME/airflow` | -| `web.image.digest` | Airflow image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | -| `web.image.pullPolicy` | Airflow image pull policy | `IfNotPresent` | -| `web.image.pullSecrets` | Airflow image pull secrets | `[]` | -| `web.image.debug` | Enable image debug mode | `false` | -| `web.baseUrl` | URL used to access to Airflow web ui | `""` | -| `web.existingConfigmap` | Name of an existing config map containing the Airflow web config file | `""` | -| `web.command` | Override default container command (useful when using custom images) | `[]` | -| `web.args` | Override default container args (useful when using custom images) | `[]` | -| `web.extraEnvVars` | Array with extra environment variables to add Airflow web pods | `[]` | -| `web.extraEnvVarsCM` | ConfigMap containing extra environment variables for Airflow web pods | `""` | -| `web.extraEnvVarsSecret` | Secret containing extra environment variables (in case of sensitive data) for Airflow web pods | `""` | -| `web.extraEnvVarsSecrets` | List of secrets with extra environment variables for Airflow web pods | `[]` | -| `web.containerPorts.http` | Airflow web HTTP container port | `8080` | -| `web.replicaCount` | Number of Airflow web replicas | `1` | -| `web.livenessProbe.enabled` | Enable livenessProbe on Airflow web containers | `true` | -| `web.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `180` | -| `web.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `20` | -| `web.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | -| `web.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | -| `web.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | -| `web.readinessProbe.enabled` | Enable readinessProbe on Airflow web containers | `true` | -| `web.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `30` | -| `web.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | -| `web.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | -| `web.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | -| `web.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | -| `web.startupProbe.enabled` | Enable startupProbe on Airflow web containers | `false` | -| `web.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `60` | -| `web.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | -| `web.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | -| `web.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | -| `web.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | -| `web.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | -| `web.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | -| `web.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | -| `web.resources.limits` | The resources limits for the Airflow web containers | `{}` | -| `web.resources.requests` | The requested resources for the Airflow web containers | `{}` | -| `web.podSecurityContext.enabled` | Enabled Airflow web pods' Security Context | `true` | -| `web.podSecurityContext.fsGroup` | Set Airflow web pod's Security Context fsGroup | `1001` | -| `web.containerSecurityContext.enabled` | Enabled Airflow web containers' Security Context | `true` | -| `web.containerSecurityContext.runAsUser` | Set Airflow web containers' Security Context runAsUser | `1001` | -| `web.containerSecurityContext.runAsNonRoot` | Set Airflow web containers' Security Context runAsNonRoot | `true` | -| `web.lifecycleHooks` | for the Airflow web container(s) to automate configuration before or after startup | `{}` | -| `web.hostAliases` | Deployment pod host aliases | `[]` | -| `web.podLabels` | Add extra labels to the Airflow web pods | `{}` | -| `web.podAnnotations` | Add extra annotations to the Airflow web pods | `{}` | -| `web.affinity` | Affinity for Airflow web pods assignment (evaluated as a template) | `{}` | -| `web.nodeAffinityPreset.key` | Node label key to match. Ignored if `web.affinity` is set. | `""` | -| `web.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `web.affinity` is set. Allowed values: `soft` or `hard` | `""` | -| `web.nodeAffinityPreset.values` | Node label values to match. Ignored if `web.affinity` is set. | `[]` | -| `web.nodeSelector` | Node labels for Airflow web pods assignment | `{}` | -| `web.podAffinityPreset` | Pod affinity preset. Ignored if `web.affinity` is set. Allowed values: `soft` or `hard`. | `""` | -| `web.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `web.affinity` is set. Allowed values: `soft` or `hard`. | `soft` | -| `web.tolerations` | Tolerations for Airflow web pods assignment | `[]` | -| `web.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | -| `web.priorityClassName` | Priority Class Name | `""` | -| `web.schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` | -| `web.terminationGracePeriodSeconds` | Seconds Airflow web pod needs to terminate gracefully | `""` | -| `web.updateStrategy.type` | Airflow web deployment strategy type | `RollingUpdate` | -| `web.updateStrategy.rollingUpdate` | Airflow web deployment rolling update configuration parameters | `{}` | -| `web.sidecars` | Add additional sidecar containers to the Airflow web pods | `[]` | -| `web.initContainers` | Add additional init containers to the Airflow web pods | `[]` | -| `web.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Airflow web pods | `[]` | -| `web.extraVolumes` | Optionally specify extra list of additional volumes for the Airflow web pods | `[]` | -| `web.pdb.create` | Deploy a pdb object for the Airflow web pods | `false` | -| `web.pdb.minAvailable` | Maximum number/percentage of unavailable Airflow web replicas | `1` | -| `web.pdb.maxUnavailable` | Maximum number/percentage of unavailable Airflow web replicas | `""` | +| Name | Description | Value | +| ------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | ------------------------- | +| `web.image.registry` | Airflow image registry | `REGISTRY_NAME` | +| `web.image.repository` | Airflow image repository | `REPOSITORY_NAME/airflow` | +| `web.image.digest` | Airflow image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `web.image.pullPolicy` | Airflow image pull policy | `IfNotPresent` | +| `web.image.pullSecrets` | Airflow image pull secrets | `[]` | +| `web.image.debug` | Enable image debug mode | `false` | +| `web.baseUrl` | URL used to access to Airflow web ui | `""` | +| `web.existingConfigmap` | Name of an existing config map containing the Airflow web config file | `""` | +| `web.command` | Override default container command (useful when using custom images) | `[]` | +| `web.args` | Override default container args (useful when using custom images) | `[]` | +| `web.extraEnvVars` | Array with extra environment variables to add Airflow web pods | `[]` | +| `web.extraEnvVarsCM` | ConfigMap containing extra environment variables for Airflow web pods | `""` | +| `web.extraEnvVarsSecret` | Secret containing extra environment variables (in case of sensitive data) for Airflow web pods | `""` | +| `web.extraEnvVarsSecrets` | List of secrets with extra environment variables for Airflow web pods | `[]` | +| `web.containerPorts.http` | Airflow web HTTP container port | `8080` | +| `web.replicaCount` | Number of Airflow web replicas | `1` | +| `web.livenessProbe.enabled` | Enable livenessProbe on Airflow web containers | `true` | +| `web.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `180` | +| `web.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `20` | +| `web.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `web.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `web.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `web.readinessProbe.enabled` | Enable readinessProbe on Airflow web containers | `true` | +| `web.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `30` | +| `web.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `web.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `web.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `web.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `web.startupProbe.enabled` | Enable startupProbe on Airflow web containers | `false` | +| `web.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `60` | +| `web.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `web.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `web.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `web.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `web.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `web.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `web.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `web.resources.limits` | The resources limits for the Airflow web containers | `{}` | +| `web.resources.requests` | The requested resources for the Airflow web containers | `{}` | +| `web.podSecurityContext.enabled` | Enabled Airflow web pods' Security Context | `true` | +| `web.podSecurityContext.fsGroup` | Set Airflow web pod's Security Context fsGroup | `1001` | +| `web.containerSecurityContext.enabled` | Enabled Airflow web containers' Security Context | `true` | +| `web.containerSecurityContext.runAsUser` | Set Airflow web containers' Security Context runAsUser | `1001` | +| `web.containerSecurityContext.runAsNonRoot` | Set Airflow web containers' Security Context runAsNonRoot | `true` | +| `web.containerSecurityContext.privileged` | Set web container's Security Context privileged | `false` | +| `web.containerSecurityContext.allowPrivilegeEscalation` | Set web container's Security Context allowPrivilegeEscalation | `false` | +| `web.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `web.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `web.lifecycleHooks` | for the Airflow web container(s) to automate configuration before or after startup | `{}` | +| `web.hostAliases` | Deployment pod host aliases | `[]` | +| `web.podLabels` | Add extra labels to the Airflow web pods | `{}` | +| `web.podAnnotations` | Add extra annotations to the Airflow web pods | `{}` | +| `web.affinity` | Affinity for Airflow web pods assignment (evaluated as a template) | `{}` | +| `web.nodeAffinityPreset.key` | Node label key to match. Ignored if `web.affinity` is set. | `""` | +| `web.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `web.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `web.nodeAffinityPreset.values` | Node label values to match. Ignored if `web.affinity` is set. | `[]` | +| `web.nodeSelector` | Node labels for Airflow web pods assignment | `{}` | +| `web.podAffinityPreset` | Pod affinity preset. Ignored if `web.affinity` is set. Allowed values: `soft` or `hard`. | `""` | +| `web.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `web.affinity` is set. Allowed values: `soft` or `hard`. | `soft` | +| `web.tolerations` | Tolerations for Airflow web pods assignment | `[]` | +| `web.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `web.priorityClassName` | Priority Class Name | `""` | +| `web.schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` | +| `web.terminationGracePeriodSeconds` | Seconds Airflow web pod needs to terminate gracefully | `""` | +| `web.updateStrategy.type` | Airflow web deployment strategy type | `RollingUpdate` | +| `web.updateStrategy.rollingUpdate` | Airflow web deployment rolling update configuration parameters | `{}` | +| `web.sidecars` | Add additional sidecar containers to the Airflow web pods | `[]` | +| `web.initContainers` | Add additional init containers to the Airflow web pods | `[]` | +| `web.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Airflow web pods | `[]` | +| `web.extraVolumes` | Optionally specify extra list of additional volumes for the Airflow web pods | `[]` | +| `web.pdb.create` | Deploy a pdb object for the Airflow web pods | `false` | +| `web.pdb.minAvailable` | Maximum number/percentage of unavailable Airflow web replicas | `1` | +| `web.pdb.maxUnavailable` | Maximum number/percentage of unavailable Airflow web replicas | `""` | ### Airflow scheduler parameters -| Name | Description | Value | -| ------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | ----------------------------------- | -| `scheduler.image.registry` | Airflow Scheduler image registry | `REGISTRY_NAME` | -| `scheduler.image.repository` | Airflow Scheduler image repository | `REPOSITORY_NAME/airflow-scheduler` | -| `scheduler.image.digest` | Airflow Schefuler image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | -| `scheduler.image.pullPolicy` | Airflow Scheduler image pull policy | `IfNotPresent` | -| `scheduler.image.pullSecrets` | Airflow Scheduler image pull secrets | `[]` | -| `scheduler.image.debug` | Enable image debug mode | `false` | -| `scheduler.replicaCount` | Number of scheduler replicas | `1` | -| `scheduler.command` | Override cmd | `[]` | -| `scheduler.args` | Override args | `[]` | -| `scheduler.extraEnvVars` | Add extra environment variables | `[]` | -| `scheduler.extraEnvVarsCM` | ConfigMap with extra environment variables | `""` | -| `scheduler.extraEnvVarsSecret` | Secret with extra environment variables | `""` | -| `scheduler.extraEnvVarsSecrets` | List of secrets with extra environment variables for Airflow scheduler pods | `[]` | -| `scheduler.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | -| `scheduler.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | -| `scheduler.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | -| `scheduler.resources.limits` | The resources limits for the Airflow scheduler containers | `{}` | -| `scheduler.resources.requests` | The requested resources for the Airflow scheduler containers | `{}` | -| `scheduler.podSecurityContext.enabled` | Enabled Airflow scheduler pods' Security Context | `true` | -| `scheduler.podSecurityContext.fsGroup` | Set Airflow scheduler pod's Security Context fsGroup | `1001` | -| `scheduler.containerSecurityContext.enabled` | Enabled Airflow scheduler containers' Security Context | `true` | -| `scheduler.containerSecurityContext.runAsUser` | Set Airflow scheduler containers' Security Context runAsUser | `1001` | -| `scheduler.containerSecurityContext.runAsNonRoot` | Set Airflow scheduler containers' Security Context runAsNonRoot | `true` | -| `scheduler.lifecycleHooks` | for the Airflow scheduler container(s) to automate configuration before or after startup | `{}` | -| `scheduler.hostAliases` | Deployment pod host aliases | `[]` | -| `scheduler.podLabels` | Add extra labels to the Airflow scheduler pods | `{}` | -| `scheduler.podAnnotations` | Add extra annotations to the Airflow scheduler pods | `{}` | -| `scheduler.affinity` | Affinity for Airflow scheduler pods assignment (evaluated as a template) | `{}` | -| `scheduler.nodeAffinityPreset.key` | Node label key to match. Ignored if `scheduler.affinity` is set. | `""` | -| `scheduler.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `scheduler.affinity` is set. Allowed values: `soft` or `hard` | `""` | -| `scheduler.nodeAffinityPreset.values` | Node label values to match. Ignored if `scheduler.affinity` is set. | `[]` | -| `scheduler.nodeSelector` | Node labels for Airflow scheduler pods assignment | `{}` | -| `scheduler.podAffinityPreset` | Pod affinity preset. Ignored if `scheduler.affinity` is set. Allowed values: `soft` or `hard`. | `""` | -| `scheduler.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `scheduler.affinity` is set. Allowed values: `soft` or `hard`. | `soft` | -| `scheduler.tolerations` | Tolerations for Airflow scheduler pods assignment | `[]` | -| `scheduler.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | -| `scheduler.priorityClassName` | Priority Class Name | `""` | -| `scheduler.schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` | -| `scheduler.terminationGracePeriodSeconds` | Seconds Airflow scheduler pod needs to terminate gracefully | `""` | -| `scheduler.updateStrategy.type` | Airflow scheduler deployment strategy type | `RollingUpdate` | -| `scheduler.updateStrategy.rollingUpdate` | Airflow scheduler deployment rolling update configuration parameters | `{}` | -| `scheduler.sidecars` | Add additional sidecar containers to the Airflow scheduler pods | `[]` | -| `scheduler.initContainers` | Add additional init containers to the Airflow scheduler pods | `[]` | -| `scheduler.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Airflow scheduler pods | `[]` | -| `scheduler.extraVolumes` | Optionally specify extra list of additional volumes for the Airflow scheduler pods | `[]` | -| `scheduler.pdb.create` | Deploy a pdb object for the Airflow scheduler pods | `false` | -| `scheduler.pdb.minAvailable` | Maximum number/percentage of unavailable Airflow scheduler replicas | `1` | -| `scheduler.pdb.maxUnavailable` | Maximum number/percentage of unavailable Airflow scheduler replicas | `""` | +| Name | Description | Value | +| ------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | ----------------------------------- | +| `scheduler.image.registry` | Airflow Scheduler image registry | `REGISTRY_NAME` | +| `scheduler.image.repository` | Airflow Scheduler image repository | `REPOSITORY_NAME/airflow-scheduler` | +| `scheduler.image.digest` | Airflow Schefuler image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `scheduler.image.pullPolicy` | Airflow Scheduler image pull policy | `IfNotPresent` | +| `scheduler.image.pullSecrets` | Airflow Scheduler image pull secrets | `[]` | +| `scheduler.image.debug` | Enable image debug mode | `false` | +| `scheduler.replicaCount` | Number of scheduler replicas | `1` | +| `scheduler.command` | Override cmd | `[]` | +| `scheduler.args` | Override args | `[]` | +| `scheduler.extraEnvVars` | Add extra environment variables | `[]` | +| `scheduler.extraEnvVarsCM` | ConfigMap with extra environment variables | `""` | +| `scheduler.extraEnvVarsSecret` | Secret with extra environment variables | `""` | +| `scheduler.extraEnvVarsSecrets` | List of secrets with extra environment variables for Airflow scheduler pods | `[]` | +| `scheduler.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `scheduler.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `scheduler.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `scheduler.resources.limits` | The resources limits for the Airflow scheduler containers | `{}` | +| `scheduler.resources.requests` | The requested resources for the Airflow scheduler containers | `{}` | +| `scheduler.podSecurityContext.enabled` | Enabled Airflow scheduler pods' Security Context | `true` | +| `scheduler.podSecurityContext.fsGroup` | Set Airflow scheduler pod's Security Context fsGroup | `1001` | +| `scheduler.containerSecurityContext.enabled` | Enabled Airflow scheduler containers' Security Context | `true` | +| `scheduler.containerSecurityContext.runAsUser` | Set Airflow scheduler containers' Security Context runAsUser | `1001` | +| `scheduler.containerSecurityContext.runAsNonRoot` | Set Airflow scheduler containers' Security Context runAsNonRoot | `true` | +| `scheduler.containerSecurityContext.privileged` | Set scheduler container's Security Context privileged | `false` | +| `scheduler.containerSecurityContext.allowPrivilegeEscalation` | Set scheduler container's Security Context allowPrivilegeEscalation | `false` | +| `scheduler.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `scheduler.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `scheduler.lifecycleHooks` | for the Airflow scheduler container(s) to automate configuration before or after startup | `{}` | +| `scheduler.hostAliases` | Deployment pod host aliases | `[]` | +| `scheduler.podLabels` | Add extra labels to the Airflow scheduler pods | `{}` | +| `scheduler.podAnnotations` | Add extra annotations to the Airflow scheduler pods | `{}` | +| `scheduler.affinity` | Affinity for Airflow scheduler pods assignment (evaluated as a template) | `{}` | +| `scheduler.nodeAffinityPreset.key` | Node label key to match. Ignored if `scheduler.affinity` is set. | `""` | +| `scheduler.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `scheduler.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `scheduler.nodeAffinityPreset.values` | Node label values to match. Ignored if `scheduler.affinity` is set. | `[]` | +| `scheduler.nodeSelector` | Node labels for Airflow scheduler pods assignment | `{}` | +| `scheduler.podAffinityPreset` | Pod affinity preset. Ignored if `scheduler.affinity` is set. Allowed values: `soft` or `hard`. | `""` | +| `scheduler.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `scheduler.affinity` is set. Allowed values: `soft` or `hard`. | `soft` | +| `scheduler.tolerations` | Tolerations for Airflow scheduler pods assignment | `[]` | +| `scheduler.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `scheduler.priorityClassName` | Priority Class Name | `""` | +| `scheduler.schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` | +| `scheduler.terminationGracePeriodSeconds` | Seconds Airflow scheduler pod needs to terminate gracefully | `""` | +| `scheduler.updateStrategy.type` | Airflow scheduler deployment strategy type | `RollingUpdate` | +| `scheduler.updateStrategy.rollingUpdate` | Airflow scheduler deployment rolling update configuration parameters | `{}` | +| `scheduler.sidecars` | Add additional sidecar containers to the Airflow scheduler pods | `[]` | +| `scheduler.initContainers` | Add additional init containers to the Airflow scheduler pods | `[]` | +| `scheduler.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Airflow scheduler pods | `[]` | +| `scheduler.extraVolumes` | Optionally specify extra list of additional volumes for the Airflow scheduler pods | `[]` | +| `scheduler.pdb.create` | Deploy a pdb object for the Airflow scheduler pods | `false` | +| `scheduler.pdb.minAvailable` | Maximum number/percentage of unavailable Airflow scheduler replicas | `1` | +| `scheduler.pdb.maxUnavailable` | Maximum number/percentage of unavailable Airflow scheduler replicas | `""` | ### Airflow worker parameters -| Name | Description | Value | -| ---------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | -------------------------------- | -| `worker.image.registry` | Airflow Worker image registry | `REGISTRY_NAME` | -| `worker.image.repository` | Airflow Worker image repository | `REPOSITORY_NAME/airflow-worker` | -| `worker.image.digest` | Airflow Worker image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | -| `worker.image.pullPolicy` | Airflow Worker image pull policy | `IfNotPresent` | -| `worker.image.pullSecrets` | Airflow Worker image pull secrets | `[]` | -| `worker.image.debug` | Enable image debug mode | `false` | -| `worker.command` | Override default container command (useful when using custom images) | `[]` | -| `worker.args` | Override default container args (useful when using custom images) | `[]` | -| `worker.extraEnvVars` | Array with extra environment variables to add Airflow worker pods | `[]` | -| `worker.extraEnvVarsCM` | ConfigMap containing extra environment variables for Airflow worker pods | `""` | -| `worker.extraEnvVarsSecret` | Secret containing extra environment variables (in case of sensitive data) for Airflow worker pods | `""` | -| `worker.extraEnvVarsSecrets` | List of secrets with extra environment variables for Airflow worker pods | `[]` | -| `worker.containerPorts.http` | Airflow worker HTTP container port | `8793` | -| `worker.replicaCount` | Number of Airflow worker replicas | `1` | -| `worker.livenessProbe.enabled` | Enable livenessProbe on Airflow worker containers | `true` | -| `worker.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `180` | -| `worker.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `20` | -| `worker.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | -| `worker.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | -| `worker.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | -| `worker.readinessProbe.enabled` | Enable readinessProbe on Airflow worker containers | `true` | -| `worker.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `30` | -| `worker.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | -| `worker.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | -| `worker.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | -| `worker.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | -| `worker.startupProbe.enabled` | Enable startupProbe on Airflow worker containers | `false` | -| `worker.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `60` | -| `worker.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | -| `worker.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | -| `worker.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | -| `worker.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | -| `worker.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | -| `worker.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | -| `worker.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | -| `worker.resources.limits` | The resources limits for the Airflow worker containers | `{}` | -| `worker.resources.requests` | The requested resources for the Airflow worker containers | `{}` | -| `worker.podSecurityContext.enabled` | Enabled Airflow worker pods' Security Context | `true` | -| `worker.podSecurityContext.fsGroup` | Set Airflow worker pod's Security Context fsGroup | `1001` | -| `worker.containerSecurityContext.enabled` | Enabled Airflow worker containers' Security Context | `true` | -| `worker.containerSecurityContext.runAsUser` | Set Airflow worker containers' Security Context runAsUser | `1001` | -| `worker.containerSecurityContext.runAsNonRoot` | Set Airflow worker containers' Security Context runAsNonRoot | `true` | -| `worker.lifecycleHooks` | for the Airflow worker container(s) to automate configuration before or after startup | `{}` | -| `worker.hostAliases` | Deployment pod host aliases | `[]` | -| `worker.podLabels` | Add extra labels to the Airflow worker pods | `{}` | -| `worker.podAnnotations` | Add extra annotations to the Airflow worker pods | `{}` | -| `worker.affinity` | Affinity for Airflow worker pods assignment (evaluated as a template) | `{}` | -| `worker.nodeAffinityPreset.key` | Node label key to match. Ignored if `worker.affinity` is set. | `""` | -| `worker.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `worker.affinity` is set. Allowed values: `soft` or `hard` | `""` | -| `worker.nodeAffinityPreset.values` | Node label values to match. Ignored if `worker.affinity` is set. | `[]` | -| `worker.nodeSelector` | Node labels for Airflow worker pods assignment | `{}` | -| `worker.podAffinityPreset` | Pod affinity preset. Ignored if `worker.affinity` is set. Allowed values: `soft` or `hard`. | `""` | -| `worker.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `worker.affinity` is set. Allowed values: `soft` or `hard`. | `soft` | -| `worker.tolerations` | Tolerations for Airflow worker pods assignment | `[]` | -| `worker.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | -| `worker.priorityClassName` | Priority Class Name | `""` | -| `worker.schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` | -| `worker.terminationGracePeriodSeconds` | Seconds Airflow worker pod needs to terminate gracefully | `""` | -| `worker.updateStrategy.type` | Airflow worker deployment strategy type | `RollingUpdate` | -| `worker.updateStrategy.rollingUpdate` | Airflow worker deployment rolling update configuration parameters | `{}` | -| `worker.sidecars` | Add additional sidecar containers to the Airflow worker pods | `[]` | -| `worker.initContainers` | Add additional init containers to the Airflow worker pods | `[]` | -| `worker.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Airflow worker pods | `[]` | -| `worker.extraVolumes` | Optionally specify extra list of additional volumes for the Airflow worker pods | `[]` | -| `worker.extraVolumeClaimTemplates` | Optionally specify extra list of volumesClaimTemplates for the Airflow worker statefulset | `[]` | -| `worker.podTemplate` | Template to replace the default one to be use when `executor=KubernetesExecutor` to create Airflow worker pods | `{}` | -| `worker.pdb.create` | Deploy a pdb object for the Airflow worker pods | `false` | -| `worker.pdb.minAvailable` | Maximum number/percentage of unavailable Airflow worker replicas | `1` | -| `worker.pdb.maxUnavailable` | Maximum number/percentage of unavailable Airflow worker replicas | `""` | -| `worker.autoscaling.enabled` | Whether enable horizontal pod autoscaler | `false` | -| `worker.autoscaling.minReplicas` | Configure a minimum amount of pods | `1` | -| `worker.autoscaling.maxReplicas` | Configure a maximum amount of pods | `3` | -| `worker.autoscaling.targetCPU` | Define the CPU target to trigger the scaling actions (utilization percentage) | `80` | -| `worker.autoscaling.targetMemory` | Define the memory target to trigger the scaling actions (utilization percentage) | `80` | +| Name | Description | Value | +| ---------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | -------------------------------- | +| `worker.image.registry` | Airflow Worker image registry | `REGISTRY_NAME` | +| `worker.image.repository` | Airflow Worker image repository | `REPOSITORY_NAME/airflow-worker` | +| `worker.image.digest` | Airflow Worker image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `worker.image.pullPolicy` | Airflow Worker image pull policy | `IfNotPresent` | +| `worker.image.pullSecrets` | Airflow Worker image pull secrets | `[]` | +| `worker.image.debug` | Enable image debug mode | `false` | +| `worker.command` | Override default container command (useful when using custom images) | `[]` | +| `worker.args` | Override default container args (useful when using custom images) | `[]` | +| `worker.extraEnvVars` | Array with extra environment variables to add Airflow worker pods | `[]` | +| `worker.extraEnvVarsCM` | ConfigMap containing extra environment variables for Airflow worker pods | `""` | +| `worker.extraEnvVarsSecret` | Secret containing extra environment variables (in case of sensitive data) for Airflow worker pods | `""` | +| `worker.extraEnvVarsSecrets` | List of secrets with extra environment variables for Airflow worker pods | `[]` | +| `worker.containerPorts.http` | Airflow worker HTTP container port | `8793` | +| `worker.replicaCount` | Number of Airflow worker replicas | `1` | +| `worker.livenessProbe.enabled` | Enable livenessProbe on Airflow worker containers | `true` | +| `worker.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `180` | +| `worker.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `20` | +| `worker.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `worker.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `worker.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `worker.readinessProbe.enabled` | Enable readinessProbe on Airflow worker containers | `true` | +| `worker.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `30` | +| `worker.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `worker.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `worker.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `worker.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `worker.startupProbe.enabled` | Enable startupProbe on Airflow worker containers | `false` | +| `worker.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `60` | +| `worker.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `worker.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `worker.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `worker.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `worker.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `worker.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `worker.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `worker.resources.limits` | The resources limits for the Airflow worker containers | `{}` | +| `worker.resources.requests` | The requested resources for the Airflow worker containers | `{}` | +| `worker.podSecurityContext.enabled` | Enabled Airflow worker pods' Security Context | `true` | +| `worker.podSecurityContext.fsGroup` | Set Airflow worker pod's Security Context fsGroup | `1001` | +| `worker.containerSecurityContext.enabled` | Enabled Airflow worker containers' Security Context | `true` | +| `worker.containerSecurityContext.runAsUser` | Set Airflow worker containers' Security Context runAsUser | `1001` | +| `worker.containerSecurityContext.runAsNonRoot` | Set Airflow worker containers' Security Context runAsNonRoot | `true` | +| `worker.containerSecurityContext.privileged` | Set worker container's Security Context privileged | `false` | +| `worker.containerSecurityContext.allowPrivilegeEscalation` | Set worker container's Security Context allowPrivilegeEscalation | `false` | +| `worker.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `worker.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `worker.lifecycleHooks` | for the Airflow worker container(s) to automate configuration before or after startup | `{}` | +| `worker.hostAliases` | Deployment pod host aliases | `[]` | +| `worker.podLabels` | Add extra labels to the Airflow worker pods | `{}` | +| `worker.podAnnotations` | Add extra annotations to the Airflow worker pods | `{}` | +| `worker.affinity` | Affinity for Airflow worker pods assignment (evaluated as a template) | `{}` | +| `worker.nodeAffinityPreset.key` | Node label key to match. Ignored if `worker.affinity` is set. | `""` | +| `worker.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `worker.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `worker.nodeAffinityPreset.values` | Node label values to match. Ignored if `worker.affinity` is set. | `[]` | +| `worker.nodeSelector` | Node labels for Airflow worker pods assignment | `{}` | +| `worker.podAffinityPreset` | Pod affinity preset. Ignored if `worker.affinity` is set. Allowed values: `soft` or `hard`. | `""` | +| `worker.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `worker.affinity` is set. Allowed values: `soft` or `hard`. | `soft` | +| `worker.tolerations` | Tolerations for Airflow worker pods assignment | `[]` | +| `worker.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `worker.priorityClassName` | Priority Class Name | `""` | +| `worker.schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` | +| `worker.terminationGracePeriodSeconds` | Seconds Airflow worker pod needs to terminate gracefully | `""` | +| `worker.updateStrategy.type` | Airflow worker deployment strategy type | `RollingUpdate` | +| `worker.updateStrategy.rollingUpdate` | Airflow worker deployment rolling update configuration parameters | `{}` | +| `worker.sidecars` | Add additional sidecar containers to the Airflow worker pods | `[]` | +| `worker.initContainers` | Add additional init containers to the Airflow worker pods | `[]` | +| `worker.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Airflow worker pods | `[]` | +| `worker.extraVolumes` | Optionally specify extra list of additional volumes for the Airflow worker pods | `[]` | +| `worker.extraVolumeClaimTemplates` | Optionally specify extra list of volumesClaimTemplates for the Airflow worker statefulset | `[]` | +| `worker.podTemplate` | Template to replace the default one to be use when `executor=KubernetesExecutor` to create Airflow worker pods | `{}` | +| `worker.pdb.create` | Deploy a pdb object for the Airflow worker pods | `false` | +| `worker.pdb.minAvailable` | Maximum number/percentage of unavailable Airflow worker replicas | `1` | +| `worker.pdb.maxUnavailable` | Maximum number/percentage of unavailable Airflow worker replicas | `""` | +| `worker.autoscaling.enabled` | Whether enable horizontal pod autoscaler | `false` | +| `worker.autoscaling.minReplicas` | Configure a minimum amount of pods | `1` | +| `worker.autoscaling.maxReplicas` | Configure a maximum amount of pods | `3` | +| `worker.autoscaling.targetCPU` | Define the CPU target to trigger the scaling actions (utilization percentage) | `80` | +| `worker.autoscaling.targetMemory` | Define the memory target to trigger the scaling actions (utilization percentage) | `80` | ### Airflow git sync parameters @@ -404,52 +416,56 @@ The command removes all the Kubernetes components associated with the chart and ### Airflow metrics parameters -| Name | Description | Value | -| ----------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | ---------------------------------- | -| `metrics.enabled` | Whether or not to create a standalone Airflow exporter to expose Airflow metrics | `false` | -| `metrics.image.registry` | Airflow exporter image registry | `REGISTRY_NAME` | -| `metrics.image.repository` | Airflow exporter image repository | `REPOSITORY_NAME/airflow-exporter` | -| `metrics.image.digest` | Airflow exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | -| `metrics.image.pullPolicy` | Airflow exporter image pull policy | `IfNotPresent` | -| `metrics.image.pullSecrets` | Airflow exporter image pull secrets | `[]` | -| `metrics.extraEnvVars` | Array with extra environment variables to add Airflow exporter pods | `[]` | -| `metrics.extraEnvVarsCM` | ConfigMap containing extra environment variables for Airflow exporter pods | `""` | -| `metrics.extraEnvVarsSecret` | Secret containing extra environment variables (in case of sensitive data) for Airflow exporter pods | `""` | -| `metrics.containerPorts.http` | Airflow exporter metrics container port | `9112` | -| `metrics.resources.limits` | The resources limits for the container | `{}` | -| `metrics.resources.requests` | The requested resources for the container | `{}` | -| `metrics.podSecurityContext.enabled` | Enable security context for the pods | `true` | -| `metrics.podSecurityContext.fsGroup` | Set Airflow exporter pod's Security Context fsGroup | `1001` | -| `metrics.containerSecurityContext.enabled` | Enable Airflow exporter containers' Security Context | `true` | -| `metrics.containerSecurityContext.runAsUser` | Set Airflow exporter containers' Security Context runAsUser | `1001` | -| `metrics.containerSecurityContext.runAsNonRoot` | Set Airflow exporter containers' Security Context runAsNonRoot | `true` | -| `metrics.lifecycleHooks` | for the Airflow exporter container(s) to automate configuration before or after startup | `{}` | -| `metrics.hostAliases` | Airflow exporter pods host aliases | `[]` | -| `metrics.podLabels` | Extra labels for Airflow exporter pods | `{}` | -| `metrics.podAnnotations` | Extra annotations for Airflow exporter pods | `{}` | -| `metrics.podAffinityPreset` | Pod affinity preset. Ignored if `metrics.affinity` is set. Allowed values: `soft` or `hard` | `""` | -| `metrics.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `metrics.affinity` is set. Allowed values: `soft` or `hard` | `soft` | -| `metrics.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `metrics.affinity` is set. Allowed values: `soft` or `hard` | `""` | -| `metrics.nodeAffinityPreset.key` | Node label key to match Ignored if `metrics.affinity` is set. | `""` | -| `metrics.nodeAffinityPreset.values` | Node label values to match. Ignored if `metrics.affinity` is set. | `[]` | -| `metrics.affinity` | Affinity for pod assignment | `{}` | -| `metrics.nodeSelector` | Node labels for pod assignment | `{}` | -| `metrics.tolerations` | Tolerations for pod assignment | `[]` | -| `metrics.schedulerName` | Name of the k8s scheduler (other than default) for Airflow exporter | `""` | -| `metrics.service.ports.http` | Airflow exporter metrics service port | `9112` | -| `metrics.service.clusterIP` | Static clusterIP or None for headless services | `""` | -| `metrics.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | -| `metrics.service.annotations` | Annotations for the Airflow exporter service | `{}` | -| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (requires `metrics.enabled` to be `true`) | `false` | -| `metrics.serviceMonitor.namespace` | Namespace in which Prometheus is running | `""` | -| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped | `""` | -| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | -| `metrics.serviceMonitor.labels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | -| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | -| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | -| `metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | -| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | -| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | +| Name | Description | Value | +| ----------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | ---------------------------------- | +| `metrics.enabled` | Whether or not to create a standalone Airflow exporter to expose Airflow metrics | `false` | +| `metrics.image.registry` | Airflow exporter image registry | `REGISTRY_NAME` | +| `metrics.image.repository` | Airflow exporter image repository | `REPOSITORY_NAME/airflow-exporter` | +| `metrics.image.digest` | Airflow exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `metrics.image.pullPolicy` | Airflow exporter image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Airflow exporter image pull secrets | `[]` | +| `metrics.extraEnvVars` | Array with extra environment variables to add Airflow exporter pods | `[]` | +| `metrics.extraEnvVarsCM` | ConfigMap containing extra environment variables for Airflow exporter pods | `""` | +| `metrics.extraEnvVarsSecret` | Secret containing extra environment variables (in case of sensitive data) for Airflow exporter pods | `""` | +| `metrics.containerPorts.http` | Airflow exporter metrics container port | `9112` | +| `metrics.resources.limits` | The resources limits for the container | `{}` | +| `metrics.resources.requests` | The requested resources for the container | `{}` | +| `metrics.podSecurityContext.enabled` | Enable security context for the pods | `true` | +| `metrics.podSecurityContext.fsGroup` | Set Airflow exporter pod's Security Context fsGroup | `1001` | +| `metrics.containerSecurityContext.enabled` | Enable Airflow exporter containers' Security Context | `true` | +| `metrics.containerSecurityContext.runAsUser` | Set Airflow exporter containers' Security Context runAsUser | `1001` | +| `metrics.containerSecurityContext.runAsNonRoot` | Set Airflow exporter containers' Security Context runAsNonRoot | `true` | +| `metrics.containerSecurityContext.privileged` | Set metrics container's Security Context privileged | `false` | +| `metrics.containerSecurityContext.allowPrivilegeEscalation` | Set metrics container's Security Context allowPrivilegeEscalation | `false` | +| `metrics.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `metrics.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `metrics.lifecycleHooks` | for the Airflow exporter container(s) to automate configuration before or after startup | `{}` | +| `metrics.hostAliases` | Airflow exporter pods host aliases | `[]` | +| `metrics.podLabels` | Extra labels for Airflow exporter pods | `{}` | +| `metrics.podAnnotations` | Extra annotations for Airflow exporter pods | `{}` | +| `metrics.podAffinityPreset` | Pod affinity preset. Ignored if `metrics.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `metrics.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `metrics.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `metrics.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `metrics.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `metrics.nodeAffinityPreset.key` | Node label key to match Ignored if `metrics.affinity` is set. | `""` | +| `metrics.nodeAffinityPreset.values` | Node label values to match. Ignored if `metrics.affinity` is set. | `[]` | +| `metrics.affinity` | Affinity for pod assignment | `{}` | +| `metrics.nodeSelector` | Node labels for pod assignment | `{}` | +| `metrics.tolerations` | Tolerations for pod assignment | `[]` | +| `metrics.schedulerName` | Name of the k8s scheduler (other than default) for Airflow exporter | `""` | +| `metrics.service.ports.http` | Airflow exporter metrics service port | `9112` | +| `metrics.service.clusterIP` | Static clusterIP or None for headless services | `""` | +| `metrics.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `metrics.service.annotations` | Annotations for the Airflow exporter service | `{}` | +| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (requires `metrics.enabled` to be `true`) | `false` | +| `metrics.serviceMonitor.namespace` | Namespace in which Prometheus is running | `""` | +| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped | `""` | +| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.labels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | +| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | +| `metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | +| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | +| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | ### Airflow database parameters diff --git a/charts/bitnami/airflow/values.yaml b/charts/bitnami/airflow/values.yaml index 793b7b4f6..0249119fb 100644 --- a/charts/bitnami/airflow/values.yaml +++ b/charts/bitnami/airflow/values.yaml @@ -311,11 +311,21 @@ web: ## @param web.containerSecurityContext.enabled Enabled Airflow web containers' Security Context ## @param web.containerSecurityContext.runAsUser Set Airflow web containers' Security Context runAsUser ## @param web.containerSecurityContext.runAsNonRoot Set Airflow web containers' Security Context runAsNonRoot + ## @param web.containerSecurityContext.privileged Set web container's Security Context privileged + ## @param web.containerSecurityContext.allowPrivilegeEscalation Set web container's Security Context allowPrivilegeEscalation + ## @param web.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param web.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile ## containerSecurityContext: enabled: true runAsUser: 1001 runAsNonRoot: true + privileged: false + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" ## @param web.lifecycleHooks for the Airflow web container(s) to automate configuration before or after startup ## lifecycleHooks: {} @@ -515,11 +525,21 @@ scheduler: ## @param scheduler.containerSecurityContext.enabled Enabled Airflow scheduler containers' Security Context ## @param scheduler.containerSecurityContext.runAsUser Set Airflow scheduler containers' Security Context runAsUser ## @param scheduler.containerSecurityContext.runAsNonRoot Set Airflow scheduler containers' Security Context runAsNonRoot + ## @param scheduler.containerSecurityContext.privileged Set scheduler container's Security Context privileged + ## @param scheduler.containerSecurityContext.allowPrivilegeEscalation Set scheduler container's Security Context allowPrivilegeEscalation + ## @param scheduler.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param scheduler.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile ## containerSecurityContext: enabled: true runAsUser: 1001 runAsNonRoot: true + privileged: false + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" ## @param scheduler.lifecycleHooks for the Airflow scheduler container(s) to automate configuration before or after startup ## lifecycleHooks: {} @@ -767,11 +787,21 @@ worker: ## @param worker.containerSecurityContext.enabled Enabled Airflow worker containers' Security Context ## @param worker.containerSecurityContext.runAsUser Set Airflow worker containers' Security Context runAsUser ## @param worker.containerSecurityContext.runAsNonRoot Set Airflow worker containers' Security Context runAsNonRoot + ## @param worker.containerSecurityContext.privileged Set worker container's Security Context privileged + ## @param worker.containerSecurityContext.allowPrivilegeEscalation Set worker container's Security Context allowPrivilegeEscalation + ## @param worker.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param worker.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile ## containerSecurityContext: enabled: true runAsUser: 1001 runAsNonRoot: true + privileged: false + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" ## @param worker.lifecycleHooks for the Airflow worker container(s) to automate configuration before or after startup ## lifecycleHooks: {} @@ -1331,6 +1361,10 @@ metrics: ## @param metrics.containerSecurityContext.enabled Enable Airflow exporter containers' Security Context ## @param metrics.containerSecurityContext.runAsUser Set Airflow exporter containers' Security Context runAsUser ## @param metrics.containerSecurityContext.runAsNonRoot Set Airflow exporter containers' Security Context runAsNonRoot + ## @param metrics.containerSecurityContext.privileged Set metrics container's Security Context privileged + ## @param metrics.containerSecurityContext.allowPrivilegeEscalation Set metrics container's Security Context allowPrivilegeEscalation + ## @param metrics.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param metrics.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile ## e.g: ## containerSecurityContext: ## enabled: true @@ -1342,6 +1376,12 @@ metrics: enabled: true runAsUser: 1001 runAsNonRoot: true + privileged: false + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" ## @param metrics.lifecycleHooks for the Airflow exporter container(s) to automate configuration before or after startup ## lifecycleHooks: {} diff --git a/charts/bitnami/cassandra/Chart.yaml b/charts/bitnami/cassandra/Chart.yaml index bf0f8710f..3afa5e6ef 100644 --- a/charts/bitnami/cassandra/Chart.yaml +++ b/charts/bitnami/cassandra/Chart.yaml @@ -35,4 +35,4 @@ maintainers: name: cassandra sources: - https://github.com/bitnami/charts/tree/main/bitnami/cassandra -version: 10.5.8 +version: 10.6.0 diff --git a/charts/bitnami/cassandra/README.md b/charts/bitnami/cassandra/README.md index fa3a7a867..3c064df98 100644 --- a/charts/bitnami/cassandra/README.md +++ b/charts/bitnami/cassandra/README.md @@ -11,16 +11,18 @@ Trademarks: This software listing is packaged by Bitnami. The respective tradema ## TL;DR ```console -helm install my-release oci://registry-1.docker.io/bitnamicharts/cassandra +helm install my-release oci://REGISTRY_NAME/REPOSITORY_NAME/cassandra ``` +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. + ## Introduction This chart bootstraps an [Apache Cassandra](https://github.com/bitnami/containers/tree/main/bitnami/cassandra) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. -Looking to use Apache Cassandra in production? Try [VMware Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog. +Looking to use Apache Cassandra in production? Try [VMware Tanzu Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog. ## Prerequisites @@ -33,9 +35,11 @@ Looking to use Apache Cassandra in production? Try [VMware Application Catalog]( To install the chart with the release name `my-release`: ```console -helm install my-release oci://registry-1.docker.io/bitnamicharts/cassandra +helm install my-release oci://REGISTRY_NAME/REPOSITORY_NAME/cassandra ``` +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. + These commands deploy one node with Apache Cassandra on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. > **Tip**: List all releases using `helm list` @@ -77,108 +81,112 @@ The command removes all the Kubernetes components associated with the chart and ### Cassandra parameters -| Name | Description | Value | -| ----------------------------- | ---------------------------------------------------------------------------------------------------------------------- | --------------------- | -| `image.registry` | Cassandra image registry | `docker.io` | -| `image.repository` | Cassandra image repository | `bitnami/cassandra` | -| `image.tag` | Cassandra image tag (immutable tags are recommended) | `4.1.3-debian-11-r71` | -| `image.digest` | Cassandra image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | -| `image.pullPolicy` | image pull policy | `IfNotPresent` | -| `image.pullSecrets` | Cassandra image pull secrets | `[]` | -| `image.debug` | Enable image debug mode | `false` | -| `dbUser.user` | Cassandra admin user | `cassandra` | -| `dbUser.forcePassword` | Force the user to provide a non | `false` | -| `dbUser.password` | Password for `dbUser.user`. Randomly generated if empty | `""` | -| `dbUser.existingSecret` | Use an existing secret object for `dbUser.user` password (will ignore `dbUser.password`) | `""` | -| `initDBConfigMap` | ConfigMap with cql scripts. Useful for creating a keyspace and pre-populating data | `""` | -| `initDBSecret` | Secret with cql script (with sensitive data). Useful for creating a keyspace and pre-populating data | `""` | -| `existingConfiguration` | ConfigMap with custom cassandra configuration files. This overrides any other Cassandra configuration set in the chart | `""` | -| `cluster.name` | Cassandra cluster name | `cassandra` | -| `cluster.seedCount` | Number of seed nodes | `1` | -| `cluster.numTokens` | Number of tokens for each node | `256` | -| `cluster.datacenter` | Datacenter name | `dc1` | -| `cluster.rack` | Rack name | `rack1` | -| `cluster.endpointSnitch` | Endpoint Snitch | `SimpleSnitch` | -| `cluster.internodeEncryption` | DEPRECATED: use tls.internode and tls.client instead. Encryption values. | `none` | -| `cluster.clientEncryption` | Client Encryption | `false` | -| `cluster.extraSeeds` | For an external/second cassandra ring. | `[]` | -| `cluster.enableUDF` | Enable User defined functions | `false` | -| `jvm.extraOpts` | Set the value for Java Virtual Machine extra options | `""` | -| `jvm.maxHeapSize` | Set Java Virtual Machine maximum heap size (MAX_HEAP_SIZE). Calculated automatically if `nil` | `""` | -| `jvm.newHeapSize` | Set Java Virtual Machine new heap size (HEAP_NEWSIZE). Calculated automatically if `nil` | `""` | -| `command` | Command for running the container (set to default if not set). Use array form | `[]` | -| `args` | Args for running the container (set to default if not set). Use array form | `[]` | -| `extraEnvVars` | Extra environment variables to be set on cassandra container | `[]` | -| `extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars | `""` | -| `extraEnvVarsSecret` | Name of existing Secret containing extra env vars | `""` | +| Name | Description | Value | +| ----------------------------- | ---------------------------------------------------------------------------------------------------------------------- | --------------------------- | +| `image.registry` | Cassandra image registry | `REGISTRY_NAME` | +| `image.repository` | Cassandra image repository | `REPOSITORY_NAME/cassandra` | +| `image.digest` | Cassandra image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `image.pullPolicy` | image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Cassandra image pull secrets | `[]` | +| `image.debug` | Enable image debug mode | `false` | +| `dbUser.user` | Cassandra admin user | `cassandra` | +| `dbUser.forcePassword` | Force the user to provide a non | `false` | +| `dbUser.password` | Password for `dbUser.user`. Randomly generated if empty | `""` | +| `dbUser.existingSecret` | Use an existing secret object for `dbUser.user` password (will ignore `dbUser.password`) | `""` | +| `initDBConfigMap` | ConfigMap with cql scripts. Useful for creating a keyspace and pre-populating data | `""` | +| `initDBSecret` | Secret with cql script (with sensitive data). Useful for creating a keyspace and pre-populating data | `""` | +| `existingConfiguration` | ConfigMap with custom cassandra configuration files. This overrides any other Cassandra configuration set in the chart | `""` | +| `cluster.name` | Cassandra cluster name | `cassandra` | +| `cluster.seedCount` | Number of seed nodes | `1` | +| `cluster.numTokens` | Number of tokens for each node | `256` | +| `cluster.datacenter` | Datacenter name | `dc1` | +| `cluster.rack` | Rack name | `rack1` | +| `cluster.endpointSnitch` | Endpoint Snitch | `SimpleSnitch` | +| `cluster.internodeEncryption` | DEPRECATED: use tls.internode and tls.client instead. Encryption values. | `none` | +| `cluster.clientEncryption` | Client Encryption | `false` | +| `cluster.extraSeeds` | For an external/second cassandra ring. | `[]` | +| `cluster.enableUDF` | Enable User defined functions | `false` | +| `jvm.extraOpts` | Set the value for Java Virtual Machine extra options | `""` | +| `jvm.maxHeapSize` | Set Java Virtual Machine maximum heap size (MAX_HEAP_SIZE). Calculated automatically if `nil` | `""` | +| `jvm.newHeapSize` | Set Java Virtual Machine new heap size (HEAP_NEWSIZE). Calculated automatically if `nil` | `""` | +| `command` | Command for running the container (set to default if not set). Use array form | `[]` | +| `args` | Args for running the container (set to default if not set). Use array form | `[]` | +| `extraEnvVars` | Extra environment variables to be set on cassandra container | `[]` | +| `extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars | `""` | +| `extraEnvVarsSecret` | Name of existing Secret containing extra env vars | `""` | ### Statefulset parameters -| Name | Description | Value | -| --------------------------------------- | ----------------------------------------------------------------------------------------- | --------------- | -| `replicaCount` | Number of Cassandra replicas | `1` | -| `updateStrategy.type` | updateStrategy for Cassandra statefulset | `RollingUpdate` | -| `hostAliases` | Add deployment host aliases | `[]` | -| `podManagementPolicy` | StatefulSet pod management policy | `OrderedReady` | -| `priorityClassName` | Cassandra pods' priority. | `""` | -| `podAnnotations` | Additional pod annotations | `{}` | -| `podLabels` | Additional pod labels | `{}` | -| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | -| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | -| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | -| `nodeAffinityPreset.key` | Node label key to match. Ignored if `affinity` is set | `""` | -| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set | `[]` | -| `affinity` | Affinity for pod assignment | `{}` | -| `nodeSelector` | Node labels for pod assignment | `{}` | -| `tolerations` | Tolerations for pod assignment | `[]` | -| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment | `[]` | -| `podSecurityContext.enabled` | Enabled Cassandra pods' Security Context | `true` | -| `podSecurityContext.fsGroup` | Set Cassandra pod's Security Context fsGroup | `1001` | -| `containerSecurityContext.enabled` | Enabled Cassandra containers' Security Context | `true` | -| `containerSecurityContext.runAsUser` | Set Cassandra container's Security Context runAsUser | `1001` | -| `containerSecurityContext.runAsNonRoot` | Force the container to be run as non root | `true` | -| `resources.limits` | The resources limits for Cassandra containers | `{}` | -| `resources.requests` | The requested resources for Cassandra containers | `{}` | -| `livenessProbe.enabled` | Enable livenessProbe | `true` | -| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `60` | -| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `30` | -| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `30` | -| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `5` | -| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | -| `readinessProbe.enabled` | Enable readinessProbe | `true` | -| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `60` | -| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | -| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `30` | -| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `5` | -| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | -| `startupProbe.enabled` | Enable startupProbe | `false` | -| `startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `0` | -| `startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | -| `startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | -| `startupProbe.failureThreshold` | Failure threshold for startupProbe | `60` | -| `startupProbe.successThreshold` | Success threshold for startupProbe | `1` | -| `customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | -| `customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | -| `customStartupProbe` | Override default startup probe | `{}` | -| `lifecycleHooks` | Override default etcd container hooks | `{}` | -| `schedulerName` | Alternative scheduler | `""` | -| `terminationGracePeriodSeconds` | In seconds, time the given to the Cassandra pod needs to terminate gracefully | `""` | -| `extraVolumes` | Optionally specify extra list of additional volumes for cassandra container | `[]` | -| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for cassandra container | `[]` | -| `initContainers` | Add additional init containers to the cassandra pods | `[]` | -| `sidecars` | Add additional sidecar containers to the cassandra pods | `[]` | -| `pdb.create` | Enable/disable a Pod Disruption Budget creation | `false` | -| `pdb.minAvailable` | Mininimum number of pods that must still be available after the eviction | `1` | -| `pdb.maxUnavailable` | Max number of pods that can be unavailable after the eviction | `""` | -| `hostNetwork` | Enable HOST Network | `false` | -| `containerPorts.intra` | Intra Port on the Host and Container | `7000` | -| `containerPorts.tls` | TLS Port on the Host and Container | `7001` | -| `containerPorts.jmx` | JMX Port on the Host and Container | `7199` | -| `containerPorts.cql` | CQL Port on the Host and Container | `9042` | -| `hostPorts.intra` | Intra Port on the Host | `""` | -| `hostPorts.tls` | TLS Port on the Host | `""` | -| `hostPorts.jmx` | JMX Port on the Host | `""` | -| `hostPorts.cql` | CQL Port on the Host | `""` | +| Name | Description | Value | +| --------------------------------------------------- | ----------------------------------------------------------------------------------------- | ---------------- | +| `replicaCount` | Number of Cassandra replicas | `1` | +| `updateStrategy.type` | updateStrategy for Cassandra statefulset | `RollingUpdate` | +| `hostAliases` | Add deployment host aliases | `[]` | +| `podManagementPolicy` | StatefulSet pod management policy | `OrderedReady` | +| `priorityClassName` | Cassandra pods' priority. | `""` | +| `podAnnotations` | Additional pod annotations | `{}` | +| `podLabels` | Additional pod labels | `{}` | +| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `nodeAffinityPreset.key` | Node label key to match. Ignored if `affinity` is set | `""` | +| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set | `[]` | +| `affinity` | Affinity for pod assignment | `{}` | +| `nodeSelector` | Node labels for pod assignment | `{}` | +| `tolerations` | Tolerations for pod assignment | `[]` | +| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment | `[]` | +| `podSecurityContext.enabled` | Enabled Cassandra pods' Security Context | `true` | +| `podSecurityContext.fsGroup` | Set Cassandra pod's Security Context fsGroup | `1001` | +| `containerSecurityContext.enabled` | Enabled Cassandra containers' Security Context | `true` | +| `containerSecurityContext.runAsUser` | Set Cassandra containers' Security Context runAsUser | `1001` | +| `containerSecurityContext.allowPrivilegeEscalation` | Set Cassandra containers' Security Context allowPrivilegeEscalation | `false` | +| `containerSecurityContext.capabilities.drop` | Set Cassandra containers' Security Context capabilities to be dropped | `["ALL"]` | +| `containerSecurityContext.readOnlyRootFilesystem` | Set Cassandra containers' Security Context readOnlyRootFilesystem | `false` | +| `containerSecurityContext.runAsNonRoot` | Set Cassandra containers' Security Context runAsNonRoot | `true` | +| `containerSecurityContext.privileged` | Set container's Security Context privileged | `false` | +| `containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `resources.limits` | The resources limits for Cassandra containers | `{}` | +| `resources.requests` | The requested resources for Cassandra containers | `{}` | +| `livenessProbe.enabled` | Enable livenessProbe | `true` | +| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `60` | +| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `30` | +| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `30` | +| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `5` | +| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `readinessProbe.enabled` | Enable readinessProbe | `true` | +| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `60` | +| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `30` | +| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `5` | +| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `startupProbe.enabled` | Enable startupProbe | `false` | +| `startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `0` | +| `startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | +| `startupProbe.failureThreshold` | Failure threshold for startupProbe | `60` | +| `startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `customStartupProbe` | Override default startup probe | `{}` | +| `lifecycleHooks` | Override default etcd container hooks | `{}` | +| `schedulerName` | Alternative scheduler | `""` | +| `terminationGracePeriodSeconds` | In seconds, time the given to the Cassandra pod needs to terminate gracefully | `""` | +| `extraVolumes` | Optionally specify extra list of additional volumes for cassandra container | `[]` | +| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for cassandra container | `[]` | +| `initContainers` | Add additional init containers to the cassandra pods | `[]` | +| `sidecars` | Add additional sidecar containers to the cassandra pods | `[]` | +| `pdb.create` | Enable/disable a Pod Disruption Budget creation | `false` | +| `pdb.minAvailable` | Mininimum number of pods that must still be available after the eviction | `1` | +| `pdb.maxUnavailable` | Max number of pods that can be unavailable after the eviction | `""` | +| `hostNetwork` | Enable HOST Network | `false` | +| `containerPorts.intra` | Intra Port on the Host and Container | `7000` | +| `containerPorts.tls` | TLS Port on the Host and Container | `7001` | +| `containerPorts.jmx` | JMX Port on the Host and Container | `7199` | +| `containerPorts.cql` | CQL Port on the Host and Container | `9042` | +| `hostPorts.intra` | Intra Port on the Host | `""` | +| `hostPorts.tls` | TLS Port on the Host | `""` | +| `hostPorts.jmx` | JMX Port on the Host | `""` | +| `hostPorts.cql` | CQL Port on the Host | `""` | ### RBAC parameters @@ -227,54 +235,52 @@ The command removes all the Kubernetes components associated with the chart and ### Volume Permissions parameters -| Name | Description | Value | -| --------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | ------------------ | -| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume | `false` | -| `volumePermissions.image.registry` | Init container volume image registry | `docker.io` | -| `volumePermissions.image.repository` | Init container volume image repository | `bitnami/os-shell` | -| `volumePermissions.image.tag` | Init container volume image tag (immutable tags are recommended) | `11-debian-11-r90` | -| `volumePermissions.image.digest` | Init container volume image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | -| `volumePermissions.image.pullPolicy` | Init container volume pull policy | `IfNotPresent` | -| `volumePermissions.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | -| `volumePermissions.resources.limits` | The resources limits for the container | `{}` | -| `volumePermissions.resources.requests` | The requested resources for the container | `{}` | -| `volumePermissions.securityContext.runAsUser` | User ID for the init container | `0` | +| Name | Description | Value | +| --------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | -------------------------- | +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume | `false` | +| `volumePermissions.image.registry` | Init container volume image registry | `REGISTRY_NAME` | +| `volumePermissions.image.repository` | Init container volume image repository | `REPOSITORY_NAME/os-shell` | +| `volumePermissions.image.digest` | Init container volume image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `volumePermissions.image.pullPolicy` | Init container volume pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `volumePermissions.resources.limits` | The resources limits for the container | `{}` | +| `volumePermissions.resources.requests` | The requested resources for the container | `{}` | +| `volumePermissions.securityContext.runAsUser` | User ID for the init container | `0` | ### Metrics parameters -| Name | Description | Value | -| -------------------------------------------- | ------------------------------------------------------------------------------------------------------------------ | ---------------------------- | -| `metrics.enabled` | Start a side-car prometheus exporter | `false` | -| `metrics.image.registry` | Cassandra exporter image registry | `docker.io` | -| `metrics.image.repository` | Cassandra exporter image name | `bitnami/cassandra-exporter` | -| `metrics.image.tag` | Cassandra exporter image tag | `2.3.8-debian-11-r429` | -| `metrics.image.digest` | Cassandra exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | -| `metrics.image.pullPolicy` | image pull policy | `IfNotPresent` | -| `metrics.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | -| `metrics.resources.limits` | The resources limits for the container | `{}` | -| `metrics.resources.requests` | The requested resources for the container | `{}` | -| `metrics.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `20` | -| `metrics.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | -| `metrics.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `45` | -| `metrics.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` | -| `metrics.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | -| `metrics.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for cassandra-exporter container | `[]` | -| `metrics.podAnnotations` | Metrics exporter pod Annotation and Labels | `{}` | -| `metrics.serviceMonitor.enabled` | If `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) | `false` | -| `metrics.serviceMonitor.namespace` | Namespace in which Prometheus is running | `monitoring` | -| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `""` | -| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | -| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | -| `metrics.serviceMonitor.metricRelabelings` | Specify Metric Relabelings to add to the scrape endpoint | `[]` | -| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | -| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | -| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | -| `metrics.serviceMonitor.labels` | Used to pass Labels that are required by the installed Prometheus Operator | `{}` | -| `metrics.containerPorts.http` | HTTP Port on the Host and Container | `8080` | -| `metrics.containerPorts.jmx` | JMX Port on the Host and Container | `5555` | -| `metrics.hostPorts.http` | HTTP Port on the Host | `""` | -| `metrics.hostPorts.jmx` | JMX Port on the Host | `""` | -| `metrics.configuration` | Configure Cassandra-exporter with a custom config.yml file | `""` | +| Name | Description | Value | +| -------------------------------------------- | ------------------------------------------------------------------------------------------------------------------ | ------------------------------------ | +| `metrics.enabled` | Start a side-car prometheus exporter | `false` | +| `metrics.image.registry` | Cassandra exporter image registry | `REGISTRY_NAME` | +| `metrics.image.repository` | Cassandra exporter image name | `REPOSITORY_NAME/cassandra-exporter` | +| `metrics.image.digest` | Cassandra exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `metrics.image.pullPolicy` | image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `metrics.resources.limits` | The resources limits for the container | `{}` | +| `metrics.resources.requests` | The requested resources for the container | `{}` | +| `metrics.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `20` | +| `metrics.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `metrics.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `45` | +| `metrics.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` | +| `metrics.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `metrics.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for cassandra-exporter container | `[]` | +| `metrics.podAnnotations` | Metrics exporter pod Annotation and Labels | `{}` | +| `metrics.serviceMonitor.enabled` | If `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) | `false` | +| `metrics.serviceMonitor.namespace` | Namespace in which Prometheus is running | `monitoring` | +| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `""` | +| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | +| `metrics.serviceMonitor.metricRelabelings` | Specify Metric Relabelings to add to the scrape endpoint | `[]` | +| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | +| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | +| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | +| `metrics.serviceMonitor.labels` | Used to pass Labels that are required by the installed Prometheus Operator | `{}` | +| `metrics.containerPorts.http` | HTTP Port on the Host and Container | `8080` | +| `metrics.containerPorts.jmx` | JMX Port on the Host and Container | `5555` | +| `metrics.hostPorts.http` | HTTP Port on the Host | `""` | +| `metrics.hostPorts.jmx` | JMX Port on the Host | `""` | +| `metrics.configuration` | Configure Cassandra-exporter with a custom config.yml file | `""` | ### TLS/SSL parameters @@ -299,15 +305,18 @@ Specify each parameter using the `--set key=value[,key=value]` argument to `helm ```console helm install my-release \ --set dbUser.user=admin,dbUser.password=password \ - oci://registry-1.docker.io/bitnamicharts/cassandra + oci://REGISTRY_NAME/REPOSITORY_NAME/cassandra ``` +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. + Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example, ```console -helm install my-release -f values.yaml oci://registry-1.docker.io/bitnamicharts/cassandra +helm install my-release -f values.yaml oci://REGISTRY_NAME/REPOSITORY_NAME/cassandra ``` +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. > **Tip**: You can use the default [values.yaml](values.yaml) ## Configuration and installation details @@ -378,9 +387,11 @@ Find more information about how to deal with common errors related to Bitnami's It's necessary to set the `dbUser.password` parameter when upgrading for readiness/liveness probes to work properly. When you install this chart for the first time, some notes will be displayed providing the credentials you must use. Please note down the password and run the command below to upgrade your chart: ```console -helm upgrade my-release oci://registry-1.docker.io/bitnamicharts/cassandra --set dbUser.password=[PASSWORD] +helm upgrade my-release oci://REGISTRY_NAME/REPOSITORY_NAME/cassandra --set dbUser.password=[PASSWORD] ``` +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. + | Note: you need to substitute the placeholder *[PASSWORD]* with the value obtained in the installation notes. ### To 9.0.0 diff --git a/charts/bitnami/cassandra/values.yaml b/charts/bitnami/cassandra/values.yaml index 9b13e634b..9aa7e1432 100644 --- a/charts/bitnami/cassandra/values.yaml +++ b/charts/bitnami/cassandra/values.yaml @@ -65,9 +65,9 @@ diagnosticMode: ## Bitnami Cassandra image ## ref: https://hub.docker.com/r/bitnami/cassandra/tags/ -## @param image.registry Cassandra image registry -## @param image.repository Cassandra image repository -## @param image.tag Cassandra image tag (immutable tags are recommended) +## @param image.registry [default: REGISTRY_NAME] Cassandra image registry +## @param image.repository [default: REPOSITORY_NAME/cassandra] Cassandra image repository +## @skip image.tag Cassandra image tag (immutable tags are recommended) ## @param image.digest Cassandra image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag ## @param image.pullPolicy image pull policy ## @param image.pullSecrets Cassandra image pull secrets @@ -287,13 +287,25 @@ podSecurityContext: ## Configure Container Security Context (only main container) ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container ## @param containerSecurityContext.enabled Enabled Cassandra containers' Security Context -## @param containerSecurityContext.runAsUser Set Cassandra container's Security Context runAsUser -## @param containerSecurityContext.runAsNonRoot Force the container to be run as non root +## @param containerSecurityContext.runAsUser Set Cassandra containers' Security Context runAsUser +## @param containerSecurityContext.allowPrivilegeEscalation Set Cassandra containers' Security Context allowPrivilegeEscalation +## @param containerSecurityContext.capabilities.drop Set Cassandra containers' Security Context capabilities to be dropped +## @param containerSecurityContext.readOnlyRootFilesystem Set Cassandra containers' Security Context readOnlyRootFilesystem +## @param containerSecurityContext.runAsNonRoot Set Cassandra containers' Security Context runAsNonRoot +## @param containerSecurityContext.privileged Set container's Security Context privileged +## @param containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile ## containerSecurityContext: enabled: true runAsUser: 1001 runAsNonRoot: true + privileged: false + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" + readOnlyRootFilesystem: false ## Cassandra pods' resource requests and limits ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ ## Minimum memory for development is 4GB and 2 CPU cores @@ -606,9 +618,9 @@ volumePermissions: ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume ## enabled: false - ## @param volumePermissions.image.registry Init container volume image registry - ## @param volumePermissions.image.repository Init container volume image repository - ## @param volumePermissions.image.tag Init container volume image tag (immutable tags are recommended) + ## @param volumePermissions.image.registry [default: REGISTRY_NAME] Init container volume image registry + ## @param volumePermissions.image.repository [default: REPOSITORY_NAME/os-shell] Init container volume image repository + ## @skip volumePermissions.image.tag Init container volume image tag (immutable tags are recommended) ## @param volumePermissions.image.digest Init container volume image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag ## @param volumePermissions.image.pullPolicy Init container volume pull policy ## @param volumePermissions.image.pullSecrets Specify docker-registry secret names as an array @@ -674,9 +686,9 @@ metrics: enabled: false ## Bitnami Cassandra Exporter image ## ref: https://hub.docker.com/r/bitnami/cassandra-exporter/tags/ - ## @param metrics.image.registry Cassandra exporter image registry - ## @param metrics.image.repository Cassandra exporter image name - ## @param metrics.image.tag Cassandra exporter image tag + ## @param metrics.image.registry [default: REGISTRY_NAME] Cassandra exporter image registry + ## @param metrics.image.repository [default: REPOSITORY_NAME/cassandra-exporter] Cassandra exporter image name + ## @skip metrics.image.tag Cassandra exporter image tag ## @param metrics.image.digest Cassandra exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag ## @param metrics.image.pullPolicy image pull policy ## @param metrics.image.pullSecrets Specify docker-registry secret names as an array diff --git a/charts/bitnami/kafka/Chart.yaml b/charts/bitnami/kafka/Chart.yaml index 31da33034..dc8fd1bb4 100644 --- a/charts/bitnami/kafka/Chart.yaml +++ b/charts/bitnami/kafka/Chart.yaml @@ -45,4 +45,4 @@ maintainers: name: kafka sources: - https://github.com/bitnami/charts/tree/main/bitnami/kafka -version: 26.2.0 +version: 26.2.1 diff --git a/charts/bitnami/kafka/README.md b/charts/bitnami/kafka/README.md index 426f340f5..63113b605 100644 --- a/charts/bitnami/kafka/README.md +++ b/charts/bitnami/kafka/README.md @@ -22,7 +22,7 @@ This chart bootstraps a [Kafka](https://github.com/bitnami/containers/tree/main/ Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. -Looking to use Apache Kafka in production? Try [VMware Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog. +Looking to use Apache Kafka in production? Try [VMware Tanzu Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog. ## Prerequisites @@ -1169,9 +1169,9 @@ The changes introduced in this version are: - TLS settings have been moved from `auth.tls.*` to `tls.*`. - Zookeeper TLS settings have been moved from `auth.zookeeper*` to `tls.zookeeper.*` - Refactor externalAccess to support the new architecture: - - `externalAccess.service.*` have been renamed to `externalAccess.controller.service.*` and `externalAccess.controller.service.*`. - - Controller pods will not configure externalAccess unless: - - `controller.controllerOnly=false` (default), meaning the pods are running as 'controller+broker' nodes. + - `externalAccess.service.*` have been renamed to `externalAccess.controller.service.*` and `externalAccess.broker.service.*`. + - Controller pods will not configure externalAccess unless either: + - `controller.controllerOnly=false` (default), meaning the pods are running as 'controller+broker' nodes; or - `externalAccess.controller.service.forceExpose=true`, for use cases where controller-only nodes want to be exposed externally. #### Upgrading from Kraft mode diff --git a/charts/bitnami/kafka/values.yaml b/charts/bitnami/kafka/values.yaml index 973082036..efe925033 100644 --- a/charts/bitnami/kafka/values.yaml +++ b/charts/bitnami/kafka/values.yaml @@ -825,7 +825,6 @@ controller: ## broker: ## @param broker.replicaCount Number of Kafka broker-only nodes - ## Ignore this section if running in Zookeeper mode. ## replicaCount: 0 ## @param broker.minId Minimal node.id values for broker-only nodes. Do not change after first initialization. diff --git a/charts/bitnami/mysql/Chart.yaml b/charts/bitnami/mysql/Chart.yaml index 595b953d3..dd2b77a85 100644 --- a/charts/bitnami/mysql/Chart.yaml +++ b/charts/bitnami/mysql/Chart.yaml @@ -36,4 +36,4 @@ maintainers: name: mysql sources: - https://github.com/bitnami/charts/tree/main/bitnami/mysql -version: 9.14.1 +version: 9.14.2 diff --git a/charts/bitnami/mysql/values.yaml b/charts/bitnami/mysql/values.yaml index 8832efb43..f003c56d2 100644 --- a/charts/bitnami/mysql/values.yaml +++ b/charts/bitnami/mysql/values.yaml @@ -204,7 +204,7 @@ primary: ## configuration: |- [mysqld] - default_authentication_plugin={{- .Values.auth.defaultAuthPlugin | default "mysql_native_password" }} + default_authentication_plugin={{- .Values.auth.defaultAuthenticationPlugin | default "mysql_native_password" }} skip-name-resolve explicit_defaults_for_timestamp basedir=/opt/bitnami/mysql diff --git a/charts/bitnami/postgresql/Chart.lock b/charts/bitnami/postgresql/Chart.lock index 5a4cd4087..35f80ca85 100644 --- a/charts/bitnami/postgresql/Chart.lock +++ b/charts/bitnami/postgresql/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: common repository: oci://registry-1.docker.io/bitnamicharts - version: 2.13.2 -digest: sha256:551ae9c020597fd0a1d62967d9899a3c57a12e92f49e7a3967b6a187efdcaead -generated: "2023-10-05T15:32:13.375699946Z" + version: 2.13.3 +digest: sha256:9a971689db0c66ea95ac2e911c05014c2b96c6077c991131ff84f2982f88fb83 +generated: "2023-11-03T20:45:06.276989379Z" diff --git a/charts/bitnami/postgresql/Chart.yaml b/charts/bitnami/postgresql/Chart.yaml index f0580f4a0..3a2e7b422 100644 --- a/charts/bitnami/postgresql/Chart.yaml +++ b/charts/bitnami/postgresql/Chart.yaml @@ -8,9 +8,9 @@ annotations: - name: os-shell image: docker.io/bitnami/os-shell:11-debian-11-r90 - name: postgres-exporter - image: docker.io/bitnami/postgres-exporter:0.14.0-debian-11-r15 + image: docker.io/bitnami/postgres-exporter:0.15.0-debian-11-r0 - name: postgresql - image: docker.io/bitnami/postgresql:16.0.0-debian-11-r13 + image: docker.io/bitnami/postgresql:16.0.0-debian-11-r14 licenses: Apache-2.0 apiVersion: v2 appVersion: 16.0.0 @@ -38,4 +38,4 @@ maintainers: name: postgresql sources: - https://github.com/bitnami/charts/tree/main/bitnami/postgresql -version: 13.1.5 +version: 13.2.1 diff --git a/charts/bitnami/postgresql/README.md b/charts/bitnami/postgresql/README.md index 6bf879d6c..11028d082 100644 --- a/charts/bitnami/postgresql/README.md +++ b/charts/bitnami/postgresql/README.md @@ -11,9 +11,11 @@ Trademarks: This software listing is packaged by Bitnami. The respective tradema ## TL;DR ```console -helm install my-release oci://registry-1.docker.io/bitnamicharts/postgresql +helm install my-release oci://REGISTRY_NAME/REPOSITORY_NAME/postgresql ``` +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. + ## Introduction This chart bootstraps a [PostgreSQL](https://github.com/bitnami/containers/tree/main/bitnami/postgresql) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. @@ -22,7 +24,7 @@ For HA, please see [this repo](https://github.com/bitnami/charts/tree/main/bitna Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. -Looking to use PostgreSQL in production? Try [VMware Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog. +Looking to use PostgreSQL in production? Try [VMware Tanzu Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog. ## Prerequisites @@ -35,9 +37,11 @@ Looking to use PostgreSQL in production? Try [VMware Application Catalog](https: To install the chart with the release name `my-release`: ```console -helm install my-release oci://registry-1.docker.io/bitnamicharts/postgresql +helm install my-release oci://REGISTRY_NAME/REPOSITORY_NAME/postgresql ``` +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. + The command deploys PostgreSQL on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. > **Tip**: List all releases using `helm list` @@ -96,65 +100,64 @@ kubectl delete pvc -l release=my-release ### PostgreSQL common parameters -| Name | Description | Value | -| ---------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------- | -| `image.registry` | PostgreSQL image registry | `docker.io` | -| `image.repository` | PostgreSQL image repository | `bitnami/postgresql` | -| `image.tag` | PostgreSQL image tag (immutable tags are recommended) | `16.0.0-debian-11-r13` | -| `image.digest` | PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | -| `image.pullPolicy` | PostgreSQL image pull policy | `IfNotPresent` | -| `image.pullSecrets` | Specify image pull secrets | `[]` | -| `image.debug` | Specify if debug values should be set | `false` | -| `auth.enablePostgresUser` | Assign a password to the "postgres" admin user. Otherwise, remote access will be blocked for this user | `true` | -| `auth.postgresPassword` | Password for the "postgres" admin user. Ignored if `auth.existingSecret` is provided | `""` | -| `auth.username` | Name for a custom user to create | `""` | -| `auth.password` | Password for the custom user to create. Ignored if `auth.existingSecret` is provided | `""` | -| `auth.database` | Name for a custom database to create | `""` | -| `auth.replicationUsername` | Name of the replication user | `repl_user` | -| `auth.replicationPassword` | Password for the replication user. Ignored if `auth.existingSecret` is provided | `""` | -| `auth.existingSecret` | Name of existing secret to use for PostgreSQL credentials. `auth.postgresPassword`, `auth.password`, and `auth.replicationPassword` will be ignored and picked up from this secret. The secret might also contains the key `ldap-password` if LDAP is enabled. `ldap.bind_password` will be ignored and picked from this secret in this case. | `""` | -| `auth.secretKeys.adminPasswordKey` | Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. | `postgres-password` | -| `auth.secretKeys.userPasswordKey` | Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. | `password` | -| `auth.secretKeys.replicationPasswordKey` | Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. | `replication-password` | -| `auth.usePasswordFiles` | Mount credentials as a files instead of using an environment variable | `false` | -| `architecture` | PostgreSQL architecture (`standalone` or `replication`) | `standalone` | -| `replication.synchronousCommit` | Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off` | `off` | -| `replication.numSynchronousReplicas` | Number of replicas that will have synchronous replication. Note: Cannot be greater than `readReplicas.replicaCount`. | `0` | -| `replication.applicationName` | Cluster application name. Useful for advanced replication settings | `my_application` | -| `containerPorts.postgresql` | PostgreSQL container port | `5432` | -| `audit.logHostname` | Log client hostnames | `false` | -| `audit.logConnections` | Add client log-in operations to the log file | `false` | -| `audit.logDisconnections` | Add client log-outs operations to the log file | `false` | -| `audit.pgAuditLog` | Add operations to log using the pgAudit extension | `""` | -| `audit.pgAuditLogCatalog` | Log catalog using pgAudit | `off` | -| `audit.clientMinMessages` | Message log level to share with the user | `error` | -| `audit.logLinePrefix` | Template for log line prefix (default if not set) | `""` | -| `audit.logTimezone` | Timezone for the log timestamps | `""` | -| `ldap.enabled` | Enable LDAP support | `false` | -| `ldap.server` | IP address or name of the LDAP server. | `""` | -| `ldap.port` | Port number on the LDAP server to connect to | `""` | -| `ldap.prefix` | String to prepend to the user name when forming the DN to bind | `""` | -| `ldap.suffix` | String to append to the user name when forming the DN to bind | `""` | -| `ldap.basedn` | Root DN to begin the search for the user in | `""` | -| `ldap.binddn` | DN of user to bind to LDAP | `""` | -| `ldap.bindpw` | Password for the user to bind to LDAP | `""` | -| `ldap.searchAttribute` | Attribute to match against the user name in the search | `""` | -| `ldap.searchFilter` | The search filter to use when doing search+bind authentication | `""` | -| `ldap.scheme` | Set to `ldaps` to use LDAPS | `""` | -| `ldap.tls.enabled` | Se to true to enable TLS encryption | `false` | -| `ldap.uri` | LDAP URL beginning in the form `ldap[s]://host[:port]/basedn`. If provided, all the other LDAP parameters will be ignored. | `""` | -| `postgresqlDataDir` | PostgreSQL data dir folder | `/bitnami/postgresql/data` | -| `postgresqlSharedPreloadLibraries` | Shared preload libraries (comma-separated list) | `pgaudit` | -| `shmVolume.enabled` | Enable emptyDir volume for /dev/shm for PostgreSQL pod(s) | `true` | -| `shmVolume.sizeLimit` | Set this to enable a size limit on the shm tmpfs | `""` | -| `tls.enabled` | Enable TLS traffic support | `false` | -| `tls.autoGenerated` | Generate automatically self-signed TLS certificates | `false` | -| `tls.preferServerCiphers` | Whether to use the server's TLS cipher preferences rather than the client's | `true` | -| `tls.certificatesSecret` | Name of an existing secret that contains the certificates | `""` | -| `tls.certFilename` | Certificate filename | `""` | -| `tls.certKeyFilename` | Certificate key filename | `""` | -| `tls.certCAFilename` | CA Certificate filename | `""` | -| `tls.crlFilename` | File containing a Certificate Revocation List | `""` | +| Name | Description | Value | +| ---------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------- | +| `image.registry` | PostgreSQL image registry | `REGISTRY_NAME` | +| `image.repository` | PostgreSQL image repository | `REPOSITORY_NAME/postgresql` | +| `image.digest` | PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `image.pullPolicy` | PostgreSQL image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify image pull secrets | `[]` | +| `image.debug` | Specify if debug values should be set | `false` | +| `auth.enablePostgresUser` | Assign a password to the "postgres" admin user. Otherwise, remote access will be blocked for this user | `true` | +| `auth.postgresPassword` | Password for the "postgres" admin user. Ignored if `auth.existingSecret` is provided | `""` | +| `auth.username` | Name for a custom user to create | `""` | +| `auth.password` | Password for the custom user to create. Ignored if `auth.existingSecret` is provided | `""` | +| `auth.database` | Name for a custom database to create | `""` | +| `auth.replicationUsername` | Name of the replication user | `repl_user` | +| `auth.replicationPassword` | Password for the replication user. Ignored if `auth.existingSecret` is provided | `""` | +| `auth.existingSecret` | Name of existing secret to use for PostgreSQL credentials. `auth.postgresPassword`, `auth.password`, and `auth.replicationPassword` will be ignored and picked up from this secret. The secret might also contains the key `ldap-password` if LDAP is enabled. `ldap.bind_password` will be ignored and picked from this secret in this case. | `""` | +| `auth.secretKeys.adminPasswordKey` | Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. | `postgres-password` | +| `auth.secretKeys.userPasswordKey` | Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. | `password` | +| `auth.secretKeys.replicationPasswordKey` | Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. | `replication-password` | +| `auth.usePasswordFiles` | Mount credentials as a files instead of using an environment variable | `false` | +| `architecture` | PostgreSQL architecture (`standalone` or `replication`) | `standalone` | +| `replication.synchronousCommit` | Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off` | `off` | +| `replication.numSynchronousReplicas` | Number of replicas that will have synchronous replication. Note: Cannot be greater than `readReplicas.replicaCount`. | `0` | +| `replication.applicationName` | Cluster application name. Useful for advanced replication settings | `my_application` | +| `containerPorts.postgresql` | PostgreSQL container port | `5432` | +| `audit.logHostname` | Log client hostnames | `false` | +| `audit.logConnections` | Add client log-in operations to the log file | `false` | +| `audit.logDisconnections` | Add client log-outs operations to the log file | `false` | +| `audit.pgAuditLog` | Add operations to log using the pgAudit extension | `""` | +| `audit.pgAuditLogCatalog` | Log catalog using pgAudit | `off` | +| `audit.clientMinMessages` | Message log level to share with the user | `error` | +| `audit.logLinePrefix` | Template for log line prefix (default if not set) | `""` | +| `audit.logTimezone` | Timezone for the log timestamps | `""` | +| `ldap.enabled` | Enable LDAP support | `false` | +| `ldap.server` | IP address or name of the LDAP server. | `""` | +| `ldap.port` | Port number on the LDAP server to connect to | `""` | +| `ldap.prefix` | String to prepend to the user name when forming the DN to bind | `""` | +| `ldap.suffix` | String to append to the user name when forming the DN to bind | `""` | +| `ldap.basedn` | Root DN to begin the search for the user in | `""` | +| `ldap.binddn` | DN of user to bind to LDAP | `""` | +| `ldap.bindpw` | Password for the user to bind to LDAP | `""` | +| `ldap.searchAttribute` | Attribute to match against the user name in the search | `""` | +| `ldap.searchFilter` | The search filter to use when doing search+bind authentication | `""` | +| `ldap.scheme` | Set to `ldaps` to use LDAPS | `""` | +| `ldap.tls.enabled` | Se to true to enable TLS encryption | `false` | +| `ldap.uri` | LDAP URL beginning in the form `ldap[s]://host[:port]/basedn`. If provided, all the other LDAP parameters will be ignored. | `""` | +| `postgresqlDataDir` | PostgreSQL data dir folder | `/bitnami/postgresql/data` | +| `postgresqlSharedPreloadLibraries` | Shared preload libraries (comma-separated list) | `pgaudit` | +| `shmVolume.enabled` | Enable emptyDir volume for /dev/shm for PostgreSQL pod(s) | `true` | +| `shmVolume.sizeLimit` | Set this to enable a size limit on the shm tmpfs | `""` | +| `tls.enabled` | Enable TLS traffic support | `false` | +| `tls.autoGenerated` | Generate automatically self-signed TLS certificates | `false` | +| `tls.preferServerCiphers` | Whether to use the server's TLS cipher preferences rather than the client's | `true` | +| `tls.certificatesSecret` | Name of an existing secret that contains the certificates | `""` | +| `tls.certFilename` | Certificate filename | `""` | +| `tls.certKeyFilename` | Certificate key filename | `""` | +| `tls.certCAFilename` | CA Certificate filename | `""` | +| `tls.crlFilename` | File containing a Certificate Revocation List | `""` | ### PostgreSQL Primary parameters @@ -208,13 +211,14 @@ kubectl delete pvc -l release=my-release | `primary.resources.requests.cpu` | The requested cpu for the PostgreSQL Primary containers | `250m` | | `primary.podSecurityContext.enabled` | Enable security context | `true` | | `primary.podSecurityContext.fsGroup` | Group ID for the pod | `1001` | -| `primary.containerSecurityContext.enabled` | Enable container security context | `true` | -| `primary.containerSecurityContext.runAsUser` | User ID for the container | `1001` | -| `primary.containerSecurityContext.runAsGroup` | Group ID for the container | `0` | -| `primary.containerSecurityContext.runAsNonRoot` | Set runAsNonRoot for the container | `true` | -| `primary.containerSecurityContext.allowPrivilegeEscalation` | Set allowPrivilegeEscalation for the container | `false` | -| `primary.containerSecurityContext.seccompProfile.type` | Set seccompProfile.type for the container | `RuntimeDefault` | -| `primary.containerSecurityContext.capabilities.drop` | Set capabilities.drop for the container | `["ALL"]` | +| `primary.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` | +| `primary.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `primary.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` | +| `primary.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` | +| `primary.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `false` | +| `primary.containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` | +| `primary.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `primary.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | | `primary.hostAliases` | PostgreSQL primary pods host aliases | `[]` | | `primary.hostNetwork` | Specify if host network should be enabled for PostgreSQL pod (postgresql primary) | `false` | | `primary.hostIPC` | Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary) | `false` | @@ -307,13 +311,14 @@ kubectl delete pvc -l release=my-release | `readReplicas.resources.requests.cpu` | The requested cpu for the PostgreSQL read only containers | `250m` | | `readReplicas.podSecurityContext.enabled` | Enable security context | `true` | | `readReplicas.podSecurityContext.fsGroup` | Group ID for the pod | `1001` | -| `readReplicas.containerSecurityContext.enabled` | Enable container security context | `true` | -| `readReplicas.containerSecurityContext.runAsUser` | User ID for the container | `1001` | -| `readReplicas.containerSecurityContext.runAsGroup` | Group ID for the container | `0` | -| `readReplicas.containerSecurityContext.runAsNonRoot` | Set runAsNonRoot for the container | `true` | -| `readReplicas.containerSecurityContext.allowPrivilegeEscalation` | Set allowPrivilegeEscalation for the container | `false` | -| `readReplicas.containerSecurityContext.seccompProfile.type` | Set seccompProfile.type for the container | `RuntimeDefault` | -| `readReplicas.containerSecurityContext.capabilities.drop` | Set capabilities.drop for the container | `["ALL"]` | +| `readReplicas.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` | +| `readReplicas.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `readReplicas.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` | +| `readReplicas.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` | +| `readReplicas.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `false` | +| `readReplicas.containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` | +| `readReplicas.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `readReplicas.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | | `readReplicas.hostAliases` | PostgreSQL read only pods host aliases | `[]` | | `readReplicas.hostNetwork` | Specify if host network should be enabled for PostgreSQL pod (PostgreSQL read only) | `false` | | `readReplicas.hostIPC` | Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary) | `false` | @@ -382,14 +387,14 @@ kubectl delete pvc -l release=my-release | `backup.cronjob.restartPolicy` | Set the cronjob parameter restartPolicy | `OnFailure` | | `backup.cronjob.podSecurityContext.enabled` | Enable PodSecurityContext for CronJob/Backup | `true` | | `backup.cronjob.podSecurityContext.fsGroup` | Group ID for the CronJob | `1001` | -| `backup.cronjob.containerSecurityContext.enabled` | Enable container security context | `true` | -| `backup.cronjob.containerSecurityContext.runAsUser` | User ID for the backup container | `1001` | -| `backup.cronjob.containerSecurityContext.runAsGroup` | Group ID for the backup container | `0` | -| `backup.cronjob.containerSecurityContext.runAsNonRoot` | Set backup container's Security Context runAsNonRoot | `true` | -| `backup.cronjob.containerSecurityContext.readOnlyRootFilesystem` | Is the container itself readonly | `true` | -| `backup.cronjob.containerSecurityContext.allowPrivilegeEscalation` | Is it possible to escalate backup pod(s) privileges | `false` | -| `backup.cronjob.containerSecurityContext.seccompProfile.type` | Set backup container's Security Context seccompProfile type | `RuntimeDefault` | -| `backup.cronjob.containerSecurityContext.capabilities.drop` | Set backup container's Security Context capabilities to drop | `["ALL"]` | +| `backup.cronjob.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` | +| `backup.cronjob.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `backup.cronjob.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` | +| `backup.cronjob.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` | +| `backup.cronjob.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `false` | +| `backup.cronjob.containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` | +| `backup.cronjob.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `backup.cronjob.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | | `backup.cronjob.command` | Set backup container's command to run | `["/bin/sh","-c","pg_dumpall --clean --if-exists --load-via-partition-root --quote-all-identifiers --no-password --file=${PGDUMP_DIR}/pg_dumpall-$(date '+%Y-%m-%d-%H-%M').pgdump"]` | | `backup.cronjob.labels` | Set the cronjob labels | `{}` | | `backup.cronjob.annotations` | Set the cronjob annotations | `{}` | @@ -425,21 +430,20 @@ kubectl delete pvc -l release=my-release ### Volume Permissions parameters -| Name | Description | Value | -| ---------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------- | ------------------ | -| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume | `false` | -| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | -| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/os-shell` | -| `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `11-debian-11-r90` | -| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | -| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | -| `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` | -| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` | -| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` | -| `volumePermissions.containerSecurityContext.runAsUser` | User ID for the init container | `0` | -| `volumePermissions.containerSecurityContext.runAsGroup` | Group ID for the init container | `0` | -| `volumePermissions.containerSecurityContext.runAsNonRoot` | runAsNonRoot for the init container | `false` | -| `volumePermissions.containerSecurityContext.seccompProfile.type` | seccompProfile.type for the init container | `RuntimeDefault` | +| Name | Description | Value | +| ---------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------- | -------------------------- | +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `REGISTRY_NAME` | +| `volumePermissions.image.repository` | Init container volume-permissions image repository | `REPOSITORY_NAME/os-shell` | +| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` | +| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` | +| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` | +| `volumePermissions.containerSecurityContext.runAsUser` | User ID for the init container | `0` | +| `volumePermissions.containerSecurityContext.runAsGroup` | Group ID for the init container | `0` | +| `volumePermissions.containerSecurityContext.runAsNonRoot` | runAsNonRoot for the init container | `false` | +| `volumePermissions.containerSecurityContext.seccompProfile.type` | seccompProfile.type for the init container | `RuntimeDefault` | ### Other Parameters @@ -456,75 +460,77 @@ kubectl delete pvc -l release=my-release ### Metrics Parameters -| Name | Description | Value | -| ----------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | --------------------------- | -| `metrics.enabled` | Start a prometheus exporter | `false` | -| `metrics.image.registry` | PostgreSQL Prometheus Exporter image registry | `docker.io` | -| `metrics.image.repository` | PostgreSQL Prometheus Exporter image repository | `bitnami/postgres-exporter` | -| `metrics.image.tag` | PostgreSQL Prometheus Exporter image tag (immutable tags are recommended) | `0.14.0-debian-11-r15` | -| `metrics.image.digest` | PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | -| `metrics.image.pullPolicy` | PostgreSQL Prometheus Exporter image pull policy | `IfNotPresent` | -| `metrics.image.pullSecrets` | Specify image pull secrets | `[]` | -| `metrics.customMetrics` | Define additional custom metrics | `{}` | -| `metrics.extraEnvVars` | Extra environment variables to add to PostgreSQL Prometheus exporter | `[]` | -| `metrics.containerSecurityContext.enabled` | Enable PostgreSQL Prometheus exporter containers' Security Context | `true` | -| `metrics.containerSecurityContext.runAsUser` | Set PostgreSQL Prometheus exporter containers' Security Context runAsUser | `1001` | -| `metrics.containerSecurityContext.runAsGroup` | Set PostgreSQL Prometheus exporter containers' Security Context runAsGroup | `0` | -| `metrics.containerSecurityContext.runAsNonRoot` | Set PostgreSQL Prometheus exporter containers' Security Context runAsNonRoot | `true` | -| `metrics.containerSecurityContext.allowPrivilegeEscalation` | Set PostgreSQL Prometheus exporter containers' Security Context allowPrivilegeEscalation | `false` | -| `metrics.containerSecurityContext.seccompProfile.type` | Set PostgreSQL Prometheus exporter containers' Security Context seccompProfile.type | `RuntimeDefault` | -| `metrics.containerSecurityContext.capabilities.drop` | Set PostgreSQL Prometheus exporter containers' Security Context capabilities.drop | `["ALL"]` | -| `metrics.livenessProbe.enabled` | Enable livenessProbe on PostgreSQL Prometheus exporter containers | `true` | -| `metrics.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` | -| `metrics.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | -| `metrics.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | -| `metrics.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | -| `metrics.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | -| `metrics.readinessProbe.enabled` | Enable readinessProbe on PostgreSQL Prometheus exporter containers | `true` | -| `metrics.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | -| `metrics.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | -| `metrics.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | -| `metrics.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | -| `metrics.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | -| `metrics.startupProbe.enabled` | Enable startupProbe on PostgreSQL Prometheus exporter containers | `false` | -| `metrics.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `10` | -| `metrics.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | -| `metrics.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | -| `metrics.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | -| `metrics.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | -| `metrics.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | -| `metrics.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | -| `metrics.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | -| `metrics.containerPorts.metrics` | PostgreSQL Prometheus exporter metrics container port | `9187` | -| `metrics.resources.limits` | The resources limits for the PostgreSQL Prometheus exporter container | `{}` | -| `metrics.resources.requests` | The requested resources for the PostgreSQL Prometheus exporter container | `{}` | -| `metrics.service.ports.metrics` | PostgreSQL Prometheus Exporter service port | `9187` | -| `metrics.service.clusterIP` | Static clusterIP or None for headless services | `""` | -| `metrics.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | -| `metrics.service.annotations` | Annotations for Prometheus to auto-discover the metrics endpoint | `{}` | -| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using Prometheus Operator | `false` | -| `metrics.serviceMonitor.namespace` | Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) | `""` | -| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `""` | -| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | -| `metrics.serviceMonitor.labels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | -| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | -| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | -| `metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | -| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | -| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | -| `metrics.prometheusRule.enabled` | Create a PrometheusRule for Prometheus Operator | `false` | -| `metrics.prometheusRule.namespace` | Namespace for the PrometheusRule Resource (defaults to the Release Namespace) | `""` | -| `metrics.prometheusRule.labels` | Additional labels that can be used so PrometheusRule will be discovered by Prometheus | `{}` | -| `metrics.prometheusRule.rules` | PrometheusRule definitions | `[]` | +| Name | Description | Value | +| ----------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | ----------------------------------- | +| `metrics.enabled` | Start a prometheus exporter | `false` | +| `metrics.image.registry` | PostgreSQL Prometheus Exporter image registry | `REGISTRY_NAME` | +| `metrics.image.repository` | PostgreSQL Prometheus Exporter image repository | `REPOSITORY_NAME/postgres-exporter` | +| `metrics.image.digest` | PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `metrics.image.pullPolicy` | PostgreSQL Prometheus Exporter image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify image pull secrets | `[]` | +| `metrics.customMetrics` | Define additional custom metrics | `{}` | +| `metrics.extraEnvVars` | Extra environment variables to add to PostgreSQL Prometheus exporter | `[]` | +| `metrics.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` | +| `metrics.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `metrics.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` | +| `metrics.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` | +| `metrics.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `false` | +| `metrics.containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` | +| `metrics.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `metrics.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `metrics.livenessProbe.enabled` | Enable livenessProbe on PostgreSQL Prometheus exporter containers | `true` | +| `metrics.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` | +| `metrics.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `metrics.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `metrics.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `metrics.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `metrics.readinessProbe.enabled` | Enable readinessProbe on PostgreSQL Prometheus exporter containers | `true` | +| `metrics.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `metrics.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `metrics.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `metrics.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `metrics.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `metrics.startupProbe.enabled` | Enable startupProbe on PostgreSQL Prometheus exporter containers | `false` | +| `metrics.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `10` | +| `metrics.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `metrics.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `metrics.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `metrics.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `metrics.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `metrics.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `metrics.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `metrics.containerPorts.metrics` | PostgreSQL Prometheus exporter metrics container port | `9187` | +| `metrics.resources.limits` | The resources limits for the PostgreSQL Prometheus exporter container | `{}` | +| `metrics.resources.requests` | The requested resources for the PostgreSQL Prometheus exporter container | `{}` | +| `metrics.service.ports.metrics` | PostgreSQL Prometheus Exporter service port | `9187` | +| `metrics.service.clusterIP` | Static clusterIP or None for headless services | `""` | +| `metrics.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `metrics.service.annotations` | Annotations for Prometheus to auto-discover the metrics endpoint | `{}` | +| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using Prometheus Operator | `false` | +| `metrics.serviceMonitor.namespace` | Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) | `""` | +| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `""` | +| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.labels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | +| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | +| `metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | +| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | +| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | +| `metrics.prometheusRule.enabled` | Create a PrometheusRule for Prometheus Operator | `false` | +| `metrics.prometheusRule.namespace` | Namespace for the PrometheusRule Resource (defaults to the Release Namespace) | `""` | +| `metrics.prometheusRule.labels` | Additional labels that can be used so PrometheusRule will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.rules` | PrometheusRule definitions | `[]` | Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, ```console helm install my-release \ --set auth.postgresPassword=secretpassword - oci://registry-1.docker.io/bitnamicharts/postgresql + oci://REGISTRY_NAME/REPOSITORY_NAME/postgresql ``` +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. + The above command sets the PostgreSQL `postgres` account password to `secretpassword`. > NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available. @@ -533,9 +539,10 @@ The above command sets the PostgreSQL `postgres` account password to `secretpass Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, ```console -helm install my-release -f values.yaml oci://registry-1.docker.io/bitnamicharts/postgresql +helm install my-release -f values.yaml oci://REGISTRY_NAME/REPOSITORY_NAME/postgresql ``` +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. > **Tip**: You can use the default [values.yaml](values.yaml) ## Configuration and installation details diff --git a/charts/bitnami/postgresql/charts/common/Chart.yaml b/charts/bitnami/postgresql/charts/common/Chart.yaml index 961b90f48..40cd22d77 100644 --- a/charts/bitnami/postgresql/charts/common/Chart.yaml +++ b/charts/bitnami/postgresql/charts/common/Chart.yaml @@ -2,7 +2,7 @@ annotations: category: Infrastructure licenses: Apache-2.0 apiVersion: v2 -appVersion: 2.13.2 +appVersion: 2.13.3 description: A Library Helm Chart for grouping common logic between bitnami charts. This chart is not deployable by itself. home: https://bitnami.com @@ -20,4 +20,4 @@ name: common sources: - https://github.com/bitnami/charts type: library -version: 2.13.2 +version: 2.13.3 diff --git a/charts/bitnami/postgresql/charts/common/README.md b/charts/bitnami/postgresql/charts/common/README.md index fe6a01000..80da4cc2f 100644 --- a/charts/bitnami/postgresql/charts/common/README.md +++ b/charts/bitnami/postgresql/charts/common/README.md @@ -34,8 +34,8 @@ Looking to use our applications in production? Try [VMware Application Catalog]( ## Prerequisites -- Kubernetes 1.19+ -- Helm 3.2.0+ +- Kubernetes 1.23+ +- Helm 3.8.0+ ## Parameters diff --git a/charts/bitnami/postgresql/charts/common/templates/_capabilities.tpl b/charts/bitnami/postgresql/charts/common/templates/_capabilities.tpl index b1257397d..115674af8 100644 --- a/charts/bitnami/postgresql/charts/common/templates/_capabilities.tpl +++ b/charts/bitnami/postgresql/charts/common/templates/_capabilities.tpl @@ -184,7 +184,7 @@ Returns true if PodSecurityPolicy is supported {{/* Returns true if AdmissionConfiguration is supported */}} -{{- define "common.capabilities.admisionConfiguration.supported" -}} +{{- define "common.capabilities.admissionConfiguration.supported" -}} {{- if semverCompare ">=1.23-0" (include "common.capabilities.kubeVersion" .) -}} {{- true -}} {{- end -}} @@ -193,7 +193,7 @@ Returns true if AdmissionConfiguration is supported {{/* Return the appropriate apiVersion for AdmissionConfiguration. */}} -{{- define "common.capabilities.admisionConfiguration.apiVersion" -}} +{{- define "common.capabilities.admissionConfiguration.apiVersion" -}} {{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) -}} {{- print "apiserver.config.k8s.io/v1alpha1" -}} {{- else if semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .) -}} diff --git a/charts/bitnami/postgresql/values.yaml b/charts/bitnami/postgresql/values.yaml index 1c371fde7..ff3cfbdfc 100644 --- a/charts/bitnami/postgresql/values.yaml +++ b/charts/bitnami/postgresql/values.yaml @@ -87,9 +87,9 @@ diagnosticMode: ## Bitnami PostgreSQL image version ## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ -## @param image.registry PostgreSQL image registry -## @param image.repository PostgreSQL image repository -## @param image.tag PostgreSQL image tag (immutable tags are recommended) +## @param image.registry [default: REGISTRY_NAME] PostgreSQL image registry +## @param image.repository [default: REPOSITORY_NAME/postgresql] PostgreSQL image repository +## @skip image.tag PostgreSQL image tag (immutable tags are recommended) ## @param image.digest PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag ## @param image.pullPolicy PostgreSQL image pull policy ## @param image.pullSecrets Specify image pull secrets @@ -98,7 +98,7 @@ diagnosticMode: image: registry: docker.io repository: bitnami/postgresql - tag: 16.0.0-debian-11-r13 + tag: 16.0.0-debian-11-r14 digest: "" ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' @@ -458,25 +458,26 @@ primary: fsGroup: 1001 ## Container Security Context ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - ## @param primary.containerSecurityContext.enabled Enable container security context - ## @param primary.containerSecurityContext.runAsUser User ID for the container - ## @param primary.containerSecurityContext.runAsGroup Group ID for the container - ## @param primary.containerSecurityContext.runAsNonRoot Set runAsNonRoot for the container - ## @param primary.containerSecurityContext.allowPrivilegeEscalation Set allowPrivilegeEscalation for the container - ## @param primary.containerSecurityContext.seccompProfile.type Set seccompProfile.type for the container - ## @param primary.containerSecurityContext.capabilities.drop Set capabilities.drop for the container + ## @param primary.containerSecurityContext.enabled Enabled containers' Security Context + ## @param primary.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param primary.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot + ## @param primary.containerSecurityContext.privileged Set container's Security Context privileged + ## @param primary.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem + ## @param primary.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation + ## @param primary.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param primary.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile ## containerSecurityContext: enabled: true runAsUser: 1001 - runAsGroup: 0 runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: false allowPrivilegeEscalation: false - seccompProfile: - type: RuntimeDefault capabilities: - drop: - - ALL + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" ## @param primary.hostAliases PostgreSQL primary pods host aliases ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ ## @@ -821,25 +822,26 @@ readReplicas: fsGroup: 1001 ## Container Security Context ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - ## @param readReplicas.containerSecurityContext.enabled Enable container security context - ## @param readReplicas.containerSecurityContext.runAsUser User ID for the container - ## @param readReplicas.containerSecurityContext.runAsGroup Group ID for the container - ## @param readReplicas.containerSecurityContext.runAsNonRoot Set runAsNonRoot for the container - ## @param readReplicas.containerSecurityContext.allowPrivilegeEscalation Set allowPrivilegeEscalation for the container - ## @param readReplicas.containerSecurityContext.seccompProfile.type Set seccompProfile.type for the container - ## @param readReplicas.containerSecurityContext.capabilities.drop Set capabilities.drop for the container + ## @param readReplicas.containerSecurityContext.enabled Enabled containers' Security Context + ## @param readReplicas.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param readReplicas.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot + ## @param readReplicas.containerSecurityContext.privileged Set container's Security Context privileged + ## @param readReplicas.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem + ## @param readReplicas.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation + ## @param readReplicas.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param readReplicas.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile ## containerSecurityContext: enabled: true runAsUser: 1001 - runAsGroup: 0 runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: false allowPrivilegeEscalation: false - seccompProfile: - type: RuntimeDefault capabilities: - drop: - - ALL + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" ## @param readReplicas.hostAliases PostgreSQL read only pods host aliases ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ ## @@ -1108,26 +1110,25 @@ backup: fsGroup: 1001 ## backup container's Security Context ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container - ## @param backup.cronjob.containerSecurityContext.enabled Enable container security context - ## @param backup.cronjob.containerSecurityContext.runAsUser User ID for the backup container - ## @param backup.cronjob.containerSecurityContext.runAsGroup Group ID for the backup container - ## @param backup.cronjob.containerSecurityContext.runAsNonRoot Set backup container's Security Context runAsNonRoot - ## @param backup.cronjob.containerSecurityContext.readOnlyRootFilesystem Is the container itself readonly - ## @param backup.cronjob.containerSecurityContext.allowPrivilegeEscalation Is it possible to escalate backup pod(s) privileges - ## @param backup.cronjob.containerSecurityContext.seccompProfile.type Set backup container's Security Context seccompProfile type - ## @param backup.cronjob.containerSecurityContext.capabilities.drop Set backup container's Security Context capabilities to drop + ## @param backup.cronjob.containerSecurityContext.enabled Enabled containers' Security Context + ## @param backup.cronjob.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param backup.cronjob.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot + ## @param backup.cronjob.containerSecurityContext.privileged Set container's Security Context privileged + ## @param backup.cronjob.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem + ## @param backup.cronjob.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation + ## @param backup.cronjob.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param backup.cronjob.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile containerSecurityContext: enabled: true runAsUser: 1001 - runAsGroup: 0 runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: false allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - seccompProfile: - type: RuntimeDefault capabilities: - drop: - - ALL + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" ## @param backup.cronjob.command Set backup container's command to run command: - /bin/sh @@ -1289,9 +1290,9 @@ volumePermissions: ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume ## enabled: false - ## @param volumePermissions.image.registry Init container volume-permissions image registry - ## @param volumePermissions.image.repository Init container volume-permissions image repository - ## @param volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended) + ## @param volumePermissions.image.registry [default: REGISTRY_NAME] Init container volume-permissions image registry + ## @param volumePermissions.image.repository [default: REPOSITORY_NAME/os-shell] Init container volume-permissions image repository + ## @skip volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended) ## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy ## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets @@ -1390,9 +1391,9 @@ metrics: ## @param metrics.enabled Start a prometheus exporter ## enabled: false - ## @param metrics.image.registry PostgreSQL Prometheus Exporter image registry - ## @param metrics.image.repository PostgreSQL Prometheus Exporter image repository - ## @param metrics.image.tag PostgreSQL Prometheus Exporter image tag (immutable tags are recommended) + ## @param metrics.image.registry [default: REGISTRY_NAME] PostgreSQL Prometheus Exporter image registry + ## @param metrics.image.repository [default: REPOSITORY_NAME/postgres-exporter] PostgreSQL Prometheus Exporter image repository + ## @skip metrics.image.tag PostgreSQL Prometheus Exporter image tag (immutable tags are recommended) ## @param metrics.image.digest PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag ## @param metrics.image.pullPolicy PostgreSQL Prometheus Exporter image pull policy ## @param metrics.image.pullSecrets Specify image pull secrets @@ -1400,7 +1401,7 @@ metrics: image: registry: docker.io repository: bitnami/postgres-exporter - tag: 0.14.0-debian-11-r15 + tag: 0.15.0-debian-11-r0 digest: "" pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. @@ -1435,25 +1436,26 @@ metrics: extraEnvVars: [] ## PostgreSQL Prometheus exporter containers' Security Context ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container - ## @param metrics.containerSecurityContext.enabled Enable PostgreSQL Prometheus exporter containers' Security Context - ## @param metrics.containerSecurityContext.runAsUser Set PostgreSQL Prometheus exporter containers' Security Context runAsUser - ## @param metrics.containerSecurityContext.runAsGroup Set PostgreSQL Prometheus exporter containers' Security Context runAsGroup - ## @param metrics.containerSecurityContext.runAsNonRoot Set PostgreSQL Prometheus exporter containers' Security Context runAsNonRoot - ## @param metrics.containerSecurityContext.allowPrivilegeEscalation Set PostgreSQL Prometheus exporter containers' Security Context allowPrivilegeEscalation - ## @param metrics.containerSecurityContext.seccompProfile.type Set PostgreSQL Prometheus exporter containers' Security Context seccompProfile.type - ## @param metrics.containerSecurityContext.capabilities.drop Set PostgreSQL Prometheus exporter containers' Security Context capabilities.drop + ## @param metrics.containerSecurityContext.enabled Enabled containers' Security Context + ## @param metrics.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param metrics.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot + ## @param metrics.containerSecurityContext.privileged Set container's Security Context privileged + ## @param metrics.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem + ## @param metrics.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation + ## @param metrics.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param metrics.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile ## containerSecurityContext: enabled: true runAsUser: 1001 - runAsGroup: 0 runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: false allowPrivilegeEscalation: false - seccompProfile: - type: RuntimeDefault capabilities: - drop: - - ALL + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" ## Configure extra options for PostgreSQL Prometheus exporter containers' liveness, readiness and startup probes ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes ## @param metrics.livenessProbe.enabled Enable livenessProbe on PostgreSQL Prometheus exporter containers diff --git a/charts/bitnami/redis/Chart.yaml b/charts/bitnami/redis/Chart.yaml index 1f8149ae3..1e15beb3c 100644 --- a/charts/bitnami/redis/Chart.yaml +++ b/charts/bitnami/redis/Chart.yaml @@ -10,12 +10,12 @@ annotations: - name: redis-exporter image: docker.io/bitnami/redis-exporter:1.55.0-debian-11-r0 - name: redis-sentinel - image: docker.io/bitnami/redis-sentinel:7.2.2-debian-11-r0 + image: docker.io/bitnami/redis-sentinel:7.2.3-debian-11-r0 - name: redis - image: docker.io/bitnami/redis:7.2.2-debian-11-r0 + image: docker.io/bitnami/redis:7.2.3-debian-11-r0 licenses: Apache-2.0 apiVersion: v2 -appVersion: 7.2.2 +appVersion: 7.2.3 dependencies: - name: common repository: file://./charts/common @@ -37,4 +37,4 @@ maintainers: name: redis sources: - https://github.com/bitnami/charts/tree/main/bitnami/redis -version: 18.2.0 +version: 18.2.1 diff --git a/charts/bitnami/redis/README.md b/charts/bitnami/redis/README.md index 81f6c6705..c629ac0fc 100644 --- a/charts/bitnami/redis/README.md +++ b/charts/bitnami/redis/README.md @@ -1015,4 +1015,4 @@ Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and -limitations under the License. +limitations under the License. \ No newline at end of file diff --git a/charts/bitnami/redis/values.yaml b/charts/bitnami/redis/values.yaml index 38147b567..0c09949d1 100644 --- a/charts/bitnami/redis/values.yaml +++ b/charts/bitnami/redis/values.yaml @@ -91,7 +91,7 @@ diagnosticMode: image: registry: docker.io repository: bitnami/redis - tag: 7.2.2-debian-11-r0 + tag: 7.2.3-debian-11-r0 digest: "" ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' @@ -1056,7 +1056,7 @@ sentinel: image: registry: docker.io repository: bitnami/redis-sentinel - tag: 7.2.2-debian-11-r0 + tag: 7.2.3-debian-11-r0 digest: "" ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' diff --git a/charts/bitnami/spark/Chart.yaml b/charts/bitnami/spark/Chart.yaml index 35858cdb6..9cd944024 100644 --- a/charts/bitnami/spark/Chart.yaml +++ b/charts/bitnami/spark/Chart.yaml @@ -6,7 +6,7 @@ annotations: category: Infrastructure images: | - name: spark - image: docker.io/bitnami/spark:3.5.0-debian-11-r10 + image: docker.io/bitnami/spark:3.5.0-debian-11-r12 licenses: Apache-2.0 apiVersion: v2 appVersion: 3.5.0 @@ -30,4 +30,4 @@ maintainers: name: spark sources: - https://github.com/bitnami/charts/tree/main/bitnami/spark -version: 8.0.2 +version: 8.1.0 diff --git a/charts/bitnami/spark/README.md b/charts/bitnami/spark/README.md index c2bfe2eda..77ea1646d 100644 --- a/charts/bitnami/spark/README.md +++ b/charts/bitnami/spark/README.md @@ -24,7 +24,7 @@ Apache Spark includes APIs for Java, Python, Scala and R. Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. -Looking to use Apache Spark in production? Try [VMware Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog. +Looking to use Apache Spark in production? Try [VMware Tanzu Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog. ## Prerequisites @@ -98,156 +98,162 @@ The command removes all the Kubernetes components associated with the chart and ### Spark master parameters -| Name | Description | Value | -| -------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | --------------- | -| `master.existingConfigmap` | The name of an existing ConfigMap with your custom configuration for master | `""` | -| `master.containerPorts.http` | Specify the port where the web interface will listen on the master over HTTP | `8080` | -| `master.containerPorts.https` | Specify the port where the web interface will listen on the master over HTTPS | `8480` | -| `master.containerPorts.cluster` | Specify the port where the master listens to communicate with workers | `7077` | -| `master.hostAliases` | Deployment pod host aliases | `[]` | -| `master.extraContainerPorts` | Specify the port where the running jobs inside the masters listens | `[]` | -| `master.daemonMemoryLimit` | Set the memory limit for the master daemon | `""` | -| `master.configOptions` | Use a string to set the config options for in the form "-Dx=y" | `""` | -| `master.extraEnvVars` | Extra environment variables to pass to the master container | `[]` | -| `master.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for master nodes | `""` | -| `master.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for master nodes | `""` | -| `master.podSecurityContext.enabled` | Enable security context | `true` | -| `master.podSecurityContext.fsGroup` | Set master pod's Security Context Group ID | `1001` | -| `master.podSecurityContext.runAsUser` | Set master pod's Security Context User ID | `1001` | -| `master.podSecurityContext.runAsGroup` | Set master pod's Security Context Group ID | `0` | -| `master.podSecurityContext.seLinuxOptions` | Set master pod's Security Context SELinux options | `{}` | -| `master.containerSecurityContext.enabled` | Enabled master containers' Security Context | `true` | -| `master.containerSecurityContext.runAsUser` | Set master containers' Security Context runAsUser | `1001` | -| `master.containerSecurityContext.runAsNonRoot` | Set master containers' Security Context runAsNonRoot | `true` | -| `master.containerSecurityContext.readOnlyRootFilesystem` | Set master containers' Security Context runAsNonRoot | `false` | -| `master.command` | Override default container command (useful when using custom images) | `[]` | -| `master.args` | Override default container args (useful when using custom images) | `[]` | -| `master.podAnnotations` | Annotations for pods in StatefulSet | `{}` | -| `master.podLabels` | Extra labels for pods in StatefulSet | `{}` | -| `master.podAffinityPreset` | Spark master pod affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` | `""` | -| `master.podAntiAffinityPreset` | Spark master pod anti-affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` | `soft` | -| `master.nodeAffinityPreset.type` | Spark master node affinity preset type. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` | `""` | -| `master.nodeAffinityPreset.key` | Spark master node label key to match Ignored if `master.affinity` is set. | `""` | -| `master.nodeAffinityPreset.values` | Spark master node label values to match. Ignored if `master.affinity` is set. | `[]` | -| `master.affinity` | Spark master affinity for pod assignment | `{}` | -| `master.nodeSelector` | Spark master node labels for pod assignment | `{}` | -| `master.tolerations` | Spark master tolerations for pod assignment | `[]` | -| `master.updateStrategy.type` | Master statefulset strategy type. | `RollingUpdate` | -| `master.priorityClassName` | master pods' priorityClassName | `""` | -| `master.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | -| `master.schedulerName` | Name of the k8s scheduler (other than default) for master pods | `""` | -| `master.terminationGracePeriodSeconds` | Seconds Redmine pod needs to terminate gracefully | `""` | -| `master.lifecycleHooks` | for the master container(s) to automate configuration before or after startup | `{}` | -| `master.extraVolumes` | Optionally specify extra list of additional volumes for the master pod(s) | `[]` | -| `master.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the master container(s) | `[]` | -| `master.extraVolumeClaimTemplates` | Optionally specify extra list of volumesClaimTemplates for the master statefulset | `[]` | -| `master.resources.limits` | The resources limits for the container | `{}` | -| `master.resources.requests` | The requested resources for the container | `{}` | -| `master.livenessProbe.enabled` | Enable livenessProbe | `true` | -| `master.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `180` | -| `master.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `20` | -| `master.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | -| `master.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | -| `master.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | -| `master.readinessProbe.enabled` | Enable readinessProbe | `true` | -| `master.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `30` | -| `master.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | -| `master.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | -| `master.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | -| `master.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | -| `master.startupProbe.enabled` | Enable startupProbe | `false` | -| `master.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` | -| `master.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | -| `master.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | -| `master.startupProbe.failureThreshold` | Failure threshold for startupProbe | `6` | -| `master.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | -| `master.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | -| `master.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | -| `master.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | -| `master.sidecars` | Add additional sidecar containers to the master pod(s) | `[]` | -| `master.initContainers` | Add initContainers to the master pods. | `[]` | +| Name | Description | Value | +| ---------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | ---------------- | +| `master.existingConfigmap` | The name of an existing ConfigMap with your custom configuration for master | `""` | +| `master.containerPorts.http` | Specify the port where the web interface will listen on the master over HTTP | `8080` | +| `master.containerPorts.https` | Specify the port where the web interface will listen on the master over HTTPS | `8480` | +| `master.containerPorts.cluster` | Specify the port where the master listens to communicate with workers | `7077` | +| `master.hostAliases` | Deployment pod host aliases | `[]` | +| `master.extraContainerPorts` | Specify the port where the running jobs inside the masters listens | `[]` | +| `master.daemonMemoryLimit` | Set the memory limit for the master daemon | `""` | +| `master.configOptions` | Use a string to set the config options for in the form "-Dx=y" | `""` | +| `master.extraEnvVars` | Extra environment variables to pass to the master container | `[]` | +| `master.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for master nodes | `""` | +| `master.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for master nodes | `""` | +| `master.podSecurityContext.enabled` | Enable security context | `true` | +| `master.podSecurityContext.fsGroup` | Set master pod's Security Context Group ID | `1001` | +| `master.podSecurityContext.runAsUser` | Set master pod's Security Context User ID | `1001` | +| `master.podSecurityContext.runAsGroup` | Set master pod's Security Context Group ID | `0` | +| `master.podSecurityContext.seLinuxOptions` | Set master pod's Security Context SELinux options | `{}` | +| `master.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` | +| `master.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `master.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` | +| `master.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` | +| `master.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `false` | +| `master.containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` | +| `master.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `master.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `master.command` | Override default container command (useful when using custom images) | `[]` | +| `master.args` | Override default container args (useful when using custom images) | `[]` | +| `master.podAnnotations` | Annotations for pods in StatefulSet | `{}` | +| `master.podLabels` | Extra labels for pods in StatefulSet | `{}` | +| `master.podAffinityPreset` | Spark master pod affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `master.podAntiAffinityPreset` | Spark master pod anti-affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `master.nodeAffinityPreset.type` | Spark master node affinity preset type. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `master.nodeAffinityPreset.key` | Spark master node label key to match Ignored if `master.affinity` is set. | `""` | +| `master.nodeAffinityPreset.values` | Spark master node label values to match. Ignored if `master.affinity` is set. | `[]` | +| `master.affinity` | Spark master affinity for pod assignment | `{}` | +| `master.nodeSelector` | Spark master node labels for pod assignment | `{}` | +| `master.tolerations` | Spark master tolerations for pod assignment | `[]` | +| `master.updateStrategy.type` | Master statefulset strategy type. | `RollingUpdate` | +| `master.priorityClassName` | master pods' priorityClassName | `""` | +| `master.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `master.schedulerName` | Name of the k8s scheduler (other than default) for master pods | `""` | +| `master.terminationGracePeriodSeconds` | Seconds Redmine pod needs to terminate gracefully | `""` | +| `master.lifecycleHooks` | for the master container(s) to automate configuration before or after startup | `{}` | +| `master.extraVolumes` | Optionally specify extra list of additional volumes for the master pod(s) | `[]` | +| `master.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the master container(s) | `[]` | +| `master.extraVolumeClaimTemplates` | Optionally specify extra list of volumesClaimTemplates for the master statefulset | `[]` | +| `master.resources.limits` | The resources limits for the container | `{}` | +| `master.resources.requests` | The requested resources for the container | `{}` | +| `master.livenessProbe.enabled` | Enable livenessProbe | `true` | +| `master.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `180` | +| `master.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `20` | +| `master.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `master.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `master.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `master.readinessProbe.enabled` | Enable readinessProbe | `true` | +| `master.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `30` | +| `master.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `master.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `master.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `master.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `master.startupProbe.enabled` | Enable startupProbe | `false` | +| `master.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` | +| `master.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `master.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | +| `master.startupProbe.failureThreshold` | Failure threshold for startupProbe | `6` | +| `master.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `master.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `master.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `master.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `master.sidecars` | Add additional sidecar containers to the master pod(s) | `[]` | +| `master.initContainers` | Add initContainers to the master pods. | `[]` | ### Spark worker parameters -| Name | Description | Value | -| -------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | --------------- | -| `worker.existingConfigmap` | The name of an existing ConfigMap with your custom configuration for workers | `""` | -| `worker.containerPorts.http` | Specify the port where the web interface will listen on the worker over HTTP | `8080` | -| `worker.containerPorts.https` | Specify the port where the web interface will listen on the worker over HTTPS | `8480` | -| `worker.containerPorts.cluster` | Specify the port where the worker listens to communicate with workers | `""` | -| `worker.hostAliases` | Add deployment host aliases | `[]` | -| `worker.extraContainerPorts` | Specify the port where the running jobs inside the workers listens | `[]` | -| `worker.daemonMemoryLimit` | Set the memory limit for the worker daemon | `""` | -| `worker.memoryLimit` | Set the maximum memory the worker is allowed to use | `""` | -| `worker.coreLimit` | Se the maximum number of cores that the worker can use | `""` | -| `worker.dir` | Set a custom working directory for the application | `""` | -| `worker.javaOptions` | Set options for the JVM in the form `-Dx=y` | `""` | -| `worker.configOptions` | Set extra options to configure the worker in the form `-Dx=y` | `""` | -| `worker.extraEnvVars` | An array to add extra env vars | `[]` | -| `worker.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for worker nodes | `""` | -| `worker.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for worker nodes | `""` | -| `worker.replicaCount` | Number of spark workers (will be the minimum number when autoscaling is enabled) | `2` | -| `worker.podSecurityContext.enabled` | Enable security context | `true` | -| `worker.podSecurityContext.fsGroup` | Group ID for the container | `1001` | -| `worker.podSecurityContext.runAsUser` | User ID for the container | `1001` | -| `worker.podSecurityContext.runAsGroup` | Group ID for the container | `0` | -| `worker.podSecurityContext.seLinuxOptions` | SELinux options for the container | `{}` | -| `worker.containerSecurityContext.enabled` | Enabled worker containers' Security Context | `true` | -| `worker.containerSecurityContext.runAsUser` | Set worker containers' Security Context runAsUser | `1001` | -| `worker.containerSecurityContext.runAsNonRoot` | Set worker containers' Security Context runAsNonRoot | `true` | -| `worker.containerSecurityContext.readOnlyRootFilesystem` | Set worker containers' Security Context runAsNonRoot | `false` | -| `worker.command` | Override default container command (useful when using custom images) | `[]` | -| `worker.args` | Override default container args (useful when using custom images) | `[]` | -| `worker.podAnnotations` | Annotations for pods in StatefulSet | `{}` | -| `worker.podLabels` | Extra labels for pods in StatefulSet | `{}` | -| `worker.podAffinityPreset` | Spark worker pod affinity preset. Ignored if `worker.affinity` is set. Allowed values: `soft` or `hard` | `""` | -| `worker.podAntiAffinityPreset` | Spark worker pod anti-affinity preset. Ignored if `worker.affinity` is set. Allowed values: `soft` or `hard` | `soft` | -| `worker.nodeAffinityPreset.type` | Spark worker node affinity preset type. Ignored if `worker.affinity` is set. Allowed values: `soft` or `hard` | `""` | -| `worker.nodeAffinityPreset.key` | Spark worker node label key to match Ignored if `worker.affinity` is set. | `""` | -| `worker.nodeAffinityPreset.values` | Spark worker node label values to match. Ignored if `worker.affinity` is set. | `[]` | -| `worker.affinity` | Spark worker affinity for pod assignment | `{}` | -| `worker.nodeSelector` | Spark worker node labels for pod assignment | `{}` | -| `worker.tolerations` | Spark worker tolerations for pod assignment | `[]` | -| `worker.updateStrategy.type` | Worker statefulset strategy type. | `RollingUpdate` | -| `worker.podManagementPolicy` | Statefulset Pod Management Policy Type | `OrderedReady` | -| `worker.priorityClassName` | worker pods' priorityClassName | `""` | -| `worker.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | -| `worker.schedulerName` | Name of the k8s scheduler (other than default) for worker pods | `""` | -| `worker.terminationGracePeriodSeconds` | Seconds Redmine pod needs to terminate gracefully | `""` | -| `worker.lifecycleHooks` | for the worker container(s) to automate configuration before or after startup | `{}` | -| `worker.extraVolumes` | Optionally specify extra list of additional volumes for the worker pod(s) | `[]` | -| `worker.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the master container(s) | `[]` | -| `worker.extraVolumeClaimTemplates` | Optionally specify extra list of volumesClaimTemplates for the worker statefulset | `[]` | -| `worker.resources.limits` | The resources limits for the container | `{}` | -| `worker.resources.requests` | The requested resources for the container | `{}` | -| `worker.livenessProbe.enabled` | Enable livenessProbe | `true` | -| `worker.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `180` | -| `worker.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `20` | -| `worker.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | -| `worker.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | -| `worker.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | -| `worker.readinessProbe.enabled` | Enable readinessProbe | `true` | -| `worker.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `30` | -| `worker.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | -| `worker.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | -| `worker.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | -| `worker.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | -| `worker.startupProbe.enabled` | Enable startupProbe | `true` | -| `worker.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` | -| `worker.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | -| `worker.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | -| `worker.startupProbe.failureThreshold` | Failure threshold for startupProbe | `6` | -| `worker.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | -| `worker.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | -| `worker.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | -| `worker.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | -| `worker.sidecars` | Add additional sidecar containers to the worker pod(s) | `[]` | -| `worker.initContainers` | Add initContainers to the worker pods. | `[]` | -| `worker.autoscaling.enabled` | Enable replica autoscaling depending on CPU | `false` | -| `worker.autoscaling.minReplicas` | Minimum number of worker replicas | `""` | -| `worker.autoscaling.maxReplicas` | Maximum number of worker replicas | `5` | -| `worker.autoscaling.targetCPU` | Target CPU utilization percentage | `50` | -| `worker.autoscaling.targetMemory` | Target Memory utilization percentage | `""` | +| Name | Description | Value | +| ---------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | ---------------- | +| `worker.existingConfigmap` | The name of an existing ConfigMap with your custom configuration for workers | `""` | +| `worker.containerPorts.http` | Specify the port where the web interface will listen on the worker over HTTP | `8080` | +| `worker.containerPorts.https` | Specify the port where the web interface will listen on the worker over HTTPS | `8480` | +| `worker.containerPorts.cluster` | Specify the port where the worker listens to communicate with workers | `""` | +| `worker.hostAliases` | Add deployment host aliases | `[]` | +| `worker.extraContainerPorts` | Specify the port where the running jobs inside the workers listens | `[]` | +| `worker.daemonMemoryLimit` | Set the memory limit for the worker daemon | `""` | +| `worker.memoryLimit` | Set the maximum memory the worker is allowed to use | `""` | +| `worker.coreLimit` | Se the maximum number of cores that the worker can use | `""` | +| `worker.dir` | Set a custom working directory for the application | `""` | +| `worker.javaOptions` | Set options for the JVM in the form `-Dx=y` | `""` | +| `worker.configOptions` | Set extra options to configure the worker in the form `-Dx=y` | `""` | +| `worker.extraEnvVars` | An array to add extra env vars | `[]` | +| `worker.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for worker nodes | `""` | +| `worker.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for worker nodes | `""` | +| `worker.replicaCount` | Number of spark workers (will be the minimum number when autoscaling is enabled) | `2` | +| `worker.podSecurityContext.enabled` | Enable security context | `true` | +| `worker.podSecurityContext.fsGroup` | Group ID for the container | `1001` | +| `worker.podSecurityContext.seLinuxOptions` | SELinux options for the container | `{}` | +| `worker.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` | +| `worker.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `worker.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` | +| `worker.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` | +| `worker.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `false` | +| `worker.containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` | +| `worker.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `worker.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `worker.command` | Override default container command (useful when using custom images) | `[]` | +| `worker.args` | Override default container args (useful when using custom images) | `[]` | +| `worker.podAnnotations` | Annotations for pods in StatefulSet | `{}` | +| `worker.podLabels` | Extra labels for pods in StatefulSet | `{}` | +| `worker.podAffinityPreset` | Spark worker pod affinity preset. Ignored if `worker.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `worker.podAntiAffinityPreset` | Spark worker pod anti-affinity preset. Ignored if `worker.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `worker.nodeAffinityPreset.type` | Spark worker node affinity preset type. Ignored if `worker.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `worker.nodeAffinityPreset.key` | Spark worker node label key to match Ignored if `worker.affinity` is set. | `""` | +| `worker.nodeAffinityPreset.values` | Spark worker node label values to match. Ignored if `worker.affinity` is set. | `[]` | +| `worker.affinity` | Spark worker affinity for pod assignment | `{}` | +| `worker.nodeSelector` | Spark worker node labels for pod assignment | `{}` | +| `worker.tolerations` | Spark worker tolerations for pod assignment | `[]` | +| `worker.updateStrategy.type` | Worker statefulset strategy type. | `RollingUpdate` | +| `worker.podManagementPolicy` | Statefulset Pod Management Policy Type | `OrderedReady` | +| `worker.priorityClassName` | worker pods' priorityClassName | `""` | +| `worker.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `worker.schedulerName` | Name of the k8s scheduler (other than default) for worker pods | `""` | +| `worker.terminationGracePeriodSeconds` | Seconds Redmine pod needs to terminate gracefully | `""` | +| `worker.lifecycleHooks` | for the worker container(s) to automate configuration before or after startup | `{}` | +| `worker.extraVolumes` | Optionally specify extra list of additional volumes for the worker pod(s) | `[]` | +| `worker.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the master container(s) | `[]` | +| `worker.extraVolumeClaimTemplates` | Optionally specify extra list of volumesClaimTemplates for the worker statefulset | `[]` | +| `worker.resources.limits` | The resources limits for the container | `{}` | +| `worker.resources.requests` | The requested resources for the container | `{}` | +| `worker.livenessProbe.enabled` | Enable livenessProbe | `true` | +| `worker.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `180` | +| `worker.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `20` | +| `worker.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `worker.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `worker.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `worker.readinessProbe.enabled` | Enable readinessProbe | `true` | +| `worker.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `30` | +| `worker.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `worker.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `worker.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `worker.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `worker.startupProbe.enabled` | Enable startupProbe | `true` | +| `worker.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` | +| `worker.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `worker.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | +| `worker.startupProbe.failureThreshold` | Failure threshold for startupProbe | `6` | +| `worker.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `worker.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `worker.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `worker.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `worker.sidecars` | Add additional sidecar containers to the worker pod(s) | `[]` | +| `worker.initContainers` | Add initContainers to the worker pods. | `[]` | +| `worker.autoscaling.enabled` | Enable replica autoscaling depending on CPU | `false` | +| `worker.autoscaling.minReplicas` | Minimum number of worker replicas | `""` | +| `worker.autoscaling.maxReplicas` | Maximum number of worker replicas | `5` | +| `worker.autoscaling.targetCPU` | Target CPU utilization percentage | `50` | +| `worker.autoscaling.targetMemory` | Target Memory utilization percentage | `""` | ### Security parameters diff --git a/charts/bitnami/spark/values.yaml b/charts/bitnami/spark/values.yaml index 8a78a913d..bb87b6229 100644 --- a/charts/bitnami/spark/values.yaml +++ b/charts/bitnami/spark/values.yaml @@ -95,7 +95,7 @@ diagnosticMode: image: registry: docker.io repository: bitnami/spark - tag: 3.5.0-debian-11-r10 + tag: 3.5.0-debian-11-r12 digest: "" ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' @@ -183,16 +183,26 @@ master: seLinuxOptions: {} ## Configure Container Security Context ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container - ## @param master.containerSecurityContext.enabled Enabled master containers' Security Context - ## @param master.containerSecurityContext.runAsUser Set master containers' Security Context runAsUser - ## @param master.containerSecurityContext.runAsNonRoot Set master containers' Security Context runAsNonRoot - ## @param master.containerSecurityContext.readOnlyRootFilesystem Set master containers' Security Context runAsNonRoot + ## @param master.containerSecurityContext.enabled Enabled containers' Security Context + ## @param master.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param master.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot + ## @param master.containerSecurityContext.privileged Set container's Security Context privileged + ## @param master.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem + ## @param master.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation + ## @param master.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param master.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile ## containerSecurityContext: enabled: true runAsUser: 1001 runAsNonRoot: true + privileged: false readOnlyRootFilesystem: false + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" ## @param master.command Override default container command (useful when using custom images) ## command: [] @@ -451,28 +461,34 @@ worker: ## https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ ## @param worker.podSecurityContext.enabled Enable security context ## @param worker.podSecurityContext.fsGroup Group ID for the container - ## @param worker.podSecurityContext.runAsUser User ID for the container - ## @param worker.podSecurityContext.runAsGroup Group ID for the container ## @param worker.podSecurityContext.seLinuxOptions SELinux options for the container ## podSecurityContext: enabled: true fsGroup: 1001 - runAsUser: 1001 - runAsGroup: 0 seLinuxOptions: {} ## Configure Container Security Context ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container - ## @param worker.containerSecurityContext.enabled Enabled worker containers' Security Context - ## @param worker.containerSecurityContext.runAsUser Set worker containers' Security Context runAsUser - ## @param worker.containerSecurityContext.runAsNonRoot Set worker containers' Security Context runAsNonRoot - ## @param worker.containerSecurityContext.readOnlyRootFilesystem Set worker containers' Security Context runAsNonRoot + ## @param worker.containerSecurityContext.enabled Enabled containers' Security Context + ## @param worker.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param worker.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot + ## @param worker.containerSecurityContext.privileged Set container's Security Context privileged + ## @param worker.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem + ## @param worker.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation + ## @param worker.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param worker.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile ## containerSecurityContext: enabled: true runAsUser: 1001 runAsNonRoot: true + privileged: false readOnlyRootFilesystem: false + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" ## @param worker.command Override default container command (useful when using custom images) ## command: [] diff --git a/charts/bitnami/tomcat/Chart.lock b/charts/bitnami/tomcat/Chart.lock index 73f9c6268..4c3ccd233 100644 --- a/charts/bitnami/tomcat/Chart.lock +++ b/charts/bitnami/tomcat/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: common repository: oci://registry-1.docker.io/bitnamicharts - version: 2.13.2 -digest: sha256:551ae9c020597fd0a1d62967d9899a3c57a12e92f49e7a3967b6a187efdcaead -generated: "2023-10-09T21:56:34.987847613Z" + version: 2.13.3 +digest: sha256:9a971689db0c66ea95ac2e911c05014c2b96c6077c991131ff84f2982f88fb83 +generated: "2023-10-31T12:41:05.52315381+01:00" diff --git a/charts/bitnami/tomcat/Chart.yaml b/charts/bitnami/tomcat/Chart.yaml index a70b32f90..5e5a58e47 100644 --- a/charts/bitnami/tomcat/Chart.yaml +++ b/charts/bitnami/tomcat/Chart.yaml @@ -38,4 +38,4 @@ maintainers: name: tomcat sources: - https://github.com/bitnami/charts/tree/main/bitnami/tomcat -version: 10.10.10 +version: 10.11.0 diff --git a/charts/bitnami/tomcat/README.md b/charts/bitnami/tomcat/README.md index b4fc3db0e..1cbb9e013 100644 --- a/charts/bitnami/tomcat/README.md +++ b/charts/bitnami/tomcat/README.md @@ -11,9 +11,11 @@ Trademarks: This software listing is packaged by Bitnami. The respective tradema ## TL;DR ```console -helm install my-release oci://registry-1.docker.io/bitnamicharts/tomcat +helm install my-release oci://REGISTRY_NAME/REPOSITORY_NAME/tomcat ``` +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. + ## Introduction This chart bootstraps a [Tomcat](https://github.com/bitnami/containers/tree/main/bitnami/tomcat) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. @@ -22,7 +24,7 @@ Tomcat implements several Java EE specifications including Java Servlet, JavaSer Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. -Looking to use Apache Tomcat in production? Try [VMware Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog. +Looking to use Apache Tomcat in production? Try [VMware Tanzu Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog. ## Prerequisites @@ -36,9 +38,11 @@ Looking to use Apache Tomcat in production? Try [VMware Application Catalog](htt To install the chart with the release name `my-release`: ```console -helm install my-release oci://registry-1.docker.io/bitnamicharts/tomcat +helm install my-release oci://REGISTRY_NAME/REPOSITORY_NAME/tomcat ``` +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. + These commands deploy Tomcat on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. > **Tip**: List all releases using `helm list` @@ -77,93 +81,97 @@ The command removes all the Kubernetes components associated with the chart and ### Tomcat parameters -| Name | Description | Value | -| ----------------------------- | ------------------------------------------------------------------------------------------------------ | ---------------------- | -| `image.registry` | Tomcat image registry | `docker.io` | -| `image.repository` | Tomcat image repository | `bitnami/tomcat` | -| `image.tag` | Tomcat image tag (immutable tags are recommended) | `10.1.15-debian-11-r0` | -| `image.digest` | Tomcat image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | -| `image.pullPolicy` | Tomcat image pull policy | `IfNotPresent` | -| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | -| `image.debug` | Specify if debug logs should be enabled | `false` | -| `hostAliases` | Deployment pod host aliases | `[]` | -| `tomcatUsername` | Tomcat admin user | `user` | -| `tomcatPassword` | Tomcat admin password | `""` | -| `tomcatAllowRemoteManagement` | Enable remote access to management interface | `0` | -| `catalinaOpts` | Java runtime option used by tomcat JVM | `""` | -| `command` | Override default container command (useful when using custom images) | `[]` | -| `args` | Override default container args (useful when using custom images) | `[]` | -| `extraEnvVars` | Extra environment variables to be set on Tomcat container | `[]` | -| `extraEnvVarsCM` | Name of existing ConfigMap containing extra environment variables | `""` | -| `extraEnvVarsSecret` | Name of existing Secret containing extra environment variables | `""` | +| Name | Description | Value | +| ----------------------------- | ------------------------------------------------------------------------------------------------------ | ------------------------ | +| `image.registry` | Tomcat image registry | `REGISTRY_NAME` | +| `image.repository` | Tomcat image repository | `REPOSITORY_NAME/tomcat` | +| `image.digest` | Tomcat image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `image.pullPolicy` | Tomcat image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `image.debug` | Specify if debug logs should be enabled | `false` | +| `hostAliases` | Deployment pod host aliases | `[]` | +| `tomcatUsername` | Tomcat admin user | `user` | +| `tomcatPassword` | Tomcat admin password | `""` | +| `tomcatAllowRemoteManagement` | Enable remote access to management interface | `0` | +| `catalinaOpts` | Java runtime option used by tomcat JVM | `""` | +| `command` | Override default container command (useful when using custom images) | `[]` | +| `args` | Override default container args (useful when using custom images) | `[]` | +| `extraEnvVars` | Extra environment variables to be set on Tomcat container | `[]` | +| `extraEnvVarsCM` | Name of existing ConfigMap containing extra environment variables | `""` | +| `extraEnvVarsSecret` | Name of existing Secret containing extra environment variables | `""` | ### Tomcat deployment parameters -| Name | Description | Value | -| ------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | ------------------- | -| `replicaCount` | Specify number of Tomcat replicas | `1` | -| `deployment.type` | Use Deployment or StatefulSet | `deployment` | -| `updateStrategy.type` | StrategyType | `RollingUpdate` | -| `containerPorts.http` | HTTP port to expose at container level | `8080` | -| `containerExtraPorts` | Extra ports to expose at container level | `[]` | -| `podSecurityContext.enabled` | Enable Tomcat pods' Security Context | `true` | -| `podSecurityContext.fsGroup` | Set Tomcat pod's Security Context fsGroup | `1001` | -| `containerSecurityContext.enabled` | Enable Tomcat containers' SecurityContext | `true` | -| `containerSecurityContext.runAsUser` | User ID for the Tomcat container | `1001` | -| `containerSecurityContext.runAsNonRoot` | Force user to be root in Tomcat container | `true` | -| `resources.limits` | The resources limits for the Tomcat container | `{}` | -| `resources.requests` | The requested resources for the Tomcat container | `{}` | -| `livenessProbe.enabled` | Enable livenessProbe | `true` | -| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `120` | -| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | -| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | -| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | -| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | -| `readinessProbe.enabled` | Enable readinessProbe | `true` | -| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `30` | -| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `5` | -| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `3` | -| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` | -| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | -| `startupProbe.enabled` | Enable startupProbe | `false` | -| `startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` | -| `startupProbe.periodSeconds` | Period seconds for startupProbe | `5` | -| `startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `3` | -| `startupProbe.failureThreshold` | Failure threshold for startupProbe | `3` | -| `startupProbe.successThreshold` | Success threshold for startupProbe | `1` | -| `customLivenessProbe` | Override default liveness probe | `{}` | -| `customReadinessProbe` | Override default readiness probe | `{}` | -| `customStartupProbe` | Override default startup probe | `{}` | -| `podLabels` | Extra labels for Tomcat pods | `{}` | -| `podAnnotations` | Annotations for Tomcat pods | `{}` | -| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | -| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | -| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | -| `nodeAffinityPreset.key` | Node label key to match. Ignored if `affinity` is set. | `""` | -| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | -| `affinity` | Affinity for pod assignment. Evaluated as a template. | `{}` | -| `nodeSelector` | Node labels for pod assignment. Evaluated as a template. | `{}` | -| `schedulerName` | Alternative scheduler | `""` | -| `lifecycleHooks` | Override default etcd container hooks | `{}` | -| `podManagementPolicy` | podManagementPolicy to manage scaling operation of pods (only in StatefulSet mode) | `""` | -| `tolerations` | Tolerations for pod assignment. Evaluated as a template. | `[]` | -| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | -| `extraPodSpec` | Optionally specify extra PodSpec | `{}` | -| `extraVolumes` | Optionally specify extra list of additional volumes for Tomcat pods in Deployment | `[]` | -| `extraVolumeClaimTemplates` | Optionally specify extra list of additional volume claim templates for Tomcat pods in StatefulSet | `[]` | -| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for Tomcat container(s) | `[]` | -| `initContainers` | Add init containers to the Tomcat pods. | `[]` | -| `sidecars` | Add sidecars to the Tomcat pods. | `[]` | -| `persistence.enabled` | Enable persistence | `true` | -| `persistence.storageClass` | PVC Storage Class for Tomcat volume | `""` | -| `persistence.annotations` | Persistent Volume Claim annotations | `{}` | -| `persistence.accessModes` | PVC Access Modes for Tomcat volume | `["ReadWriteOnce"]` | -| `persistence.size` | PVC Storage Request for Tomcat volume | `8Gi` | -| `persistence.existingClaim` | An Existing PVC name for Tomcat volume | `""` | -| `persistence.selectorLabels` | Selector labels to use in volume claim template in statefulset | `{}` | -| `networkPolicy.enabled` | Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. | `false` | -| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | -| `networkPolicy.explicitNamespacesSelector` | A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed | `{}` | +| Name | Description | Value | +| --------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | ------------------- | +| `replicaCount` | Specify number of Tomcat replicas | `1` | +| `deployment.type` | Use Deployment or StatefulSet | `deployment` | +| `updateStrategy.type` | StrategyType | `RollingUpdate` | +| `containerPorts.http` | HTTP port to expose at container level | `8080` | +| `containerExtraPorts` | Extra ports to expose at container level | `[]` | +| `podSecurityContext.enabled` | Enable Tomcat pods' Security Context | `true` | +| `podSecurityContext.fsGroup` | Set Tomcat pod's Security Context fsGroup | `1001` | +| `containerSecurityContext.enabled` | Enabled containers' Security Context | `true` | +| `containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` | +| `containerSecurityContext.privileged` | Set container's Security Context privileged | `false` | +| `containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `false` | +| `containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` | +| `containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `resources.limits` | The resources limits for the Tomcat container | `{}` | +| `resources.requests` | The requested resources for the Tomcat container | `{}` | +| `livenessProbe.enabled` | Enable livenessProbe | `true` | +| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `120` | +| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `readinessProbe.enabled` | Enable readinessProbe | `true` | +| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `30` | +| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `5` | +| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `3` | +| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` | +| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `startupProbe.enabled` | Enable startupProbe | `false` | +| `startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` | +| `startupProbe.periodSeconds` | Period seconds for startupProbe | `5` | +| `startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `3` | +| `startupProbe.failureThreshold` | Failure threshold for startupProbe | `3` | +| `startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `customLivenessProbe` | Override default liveness probe | `{}` | +| `customReadinessProbe` | Override default readiness probe | `{}` | +| `customStartupProbe` | Override default startup probe | `{}` | +| `podLabels` | Extra labels for Tomcat pods | `{}` | +| `podAnnotations` | Annotations for Tomcat pods | `{}` | +| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `nodeAffinityPreset.key` | Node label key to match. Ignored if `affinity` is set. | `""` | +| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | +| `affinity` | Affinity for pod assignment. Evaluated as a template. | `{}` | +| `nodeSelector` | Node labels for pod assignment. Evaluated as a template. | `{}` | +| `schedulerName` | Alternative scheduler | `""` | +| `lifecycleHooks` | Override default etcd container hooks | `{}` | +| `podManagementPolicy` | podManagementPolicy to manage scaling operation of pods (only in StatefulSet mode) | `""` | +| `tolerations` | Tolerations for pod assignment. Evaluated as a template. | `[]` | +| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `extraPodSpec` | Optionally specify extra PodSpec | `{}` | +| `extraVolumes` | Optionally specify extra list of additional volumes for Tomcat pods in Deployment | `[]` | +| `extraVolumeClaimTemplates` | Optionally specify extra list of additional volume claim templates for Tomcat pods in StatefulSet | `[]` | +| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for Tomcat container(s) | `[]` | +| `initContainers` | Add init containers to the Tomcat pods. | `[]` | +| `sidecars` | Add sidecars to the Tomcat pods. | `[]` | +| `persistence.enabled` | Enable persistence | `true` | +| `persistence.storageClass` | PVC Storage Class for Tomcat volume | `""` | +| `persistence.annotations` | Persistent Volume Claim annotations | `{}` | +| `persistence.accessModes` | PVC Access Modes for Tomcat volume | `["ReadWriteOnce"]` | +| `persistence.size` | PVC Storage Request for Tomcat volume | `8Gi` | +| `persistence.existingClaim` | An Existing PVC name for Tomcat volume | `""` | +| `persistence.selectorLabels` | Selector labels to use in volume claim template in statefulset | `{}` | +| `networkPolicy.enabled` | Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.explicitNamespacesSelector` | A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed | `{}` | ### Traffic Exposure parameters @@ -198,51 +206,54 @@ The command removes all the Kubernetes components associated with the chart and ### Volume Permissions parameters -| Name | Description | Value | -| -------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------- | ------------------ | -| `volumePermissions.enabled` | Enable init container that changes volume permissions in the data directory | `false` | -| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | -| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/os-shell` | -| `volumePermissions.image.tag` | Init container volume-permissions image tag | `11-debian-11-r90` | -| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | -| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | -| `volumePermissions.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | -| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` | -| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` | +| Name | Description | Value | +| -------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------- | -------------------------- | +| `volumePermissions.enabled` | Enable init container that changes volume permissions in the data directory | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `REGISTRY_NAME` | +| `volumePermissions.image.repository` | Init container volume-permissions image repository | `REPOSITORY_NAME/os-shell` | +| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` | +| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` | ### Metrics parameters -| Name | Description | Value | -| --------------------------------------------------- | ------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `metrics.jmx.enabled` | Whether or not to expose JMX metrics to Prometheus | `false` | -| `metrics.jmx.catalinaOpts` | custom option used to enabled JMX on tomcat jvm evaluated as template | `-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.port=5555 -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.local.only=true` | -| `metrics.jmx.image.registry` | JMX exporter image registry | `docker.io` | -| `metrics.jmx.image.repository` | JMX exporter image repository | `bitnami/jmx-exporter` | -| `metrics.jmx.image.tag` | JMX exporter image tag (immutable tags are recommended) | `0.19.0-debian-11-r95` | -| `metrics.jmx.image.digest` | JMX exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | -| `metrics.jmx.image.pullPolicy` | JMX exporter image pull policy | `IfNotPresent` | -| `metrics.jmx.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | -| `metrics.jmx.config` | Configuration file for JMX exporter | `""` | -| `metrics.jmx.containerSecurityContext.enabled` | Enable Prometheus JMX exporter containers' Security Context | `true` | -| `metrics.jmx.containerSecurityContext.runAsUser` | Set Prometheus JMX exporter containers' Security Context runAsUser | `1001` | -| `metrics.jmx.containerSecurityContext.runAsNonRoot` | Set Prometheus JMX exporter containers' Security Context runAsNonRoot | `true` | -| `metrics.jmx.resources.limits` | JMX Exporter container resource limits | `{}` | -| `metrics.jmx.resources.requests` | JMX Exporter container resource requests | `{}` | -| `metrics.jmx.ports.metrics` | JMX Exporter container metrics ports | `5556` | -| `metrics.jmx.existingConfigmap` | Name of existing ConfigMap with JMX exporter configuration | `""` | -| `metrics.podMonitor.podTargetLabels` | Used to keep given pod's labels in target | `[]` | -| `metrics.podMonitor.enabled` | Create PodMonitor Resource for scraping metrics using PrometheusOperator | `false` | -| `metrics.podMonitor.namespace` | Optional namespace in which Prometheus is running | `""` | -| `metrics.podMonitor.interval` | Specify the interval at which metrics should be scraped | `30s` | -| `metrics.podMonitor.scrapeTimeout` | Specify the timeout after which the scrape is ended | `30s` | -| `metrics.podMonitor.additionalLabels` | Additional labels that can be used so PodMonitors will be discovered by Prometheus | `{}` | -| `metrics.podMonitor.scheme` | Scheme to use for scraping | `http` | -| `metrics.podMonitor.tlsConfig` | TLS configuration used for scrape endpoints used by Prometheus | `{}` | -| `metrics.podMonitor.relabelings` | Prometheus relabeling rules | `[]` | -| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | -| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | -| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | `""` | -| `metrics.prometheusRule.rules` | Create specified [Rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) | `[]` | +| Name | Description | Value | +| --------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `metrics.jmx.enabled` | Whether or not to expose JMX metrics to Prometheus | `false` | +| `metrics.jmx.catalinaOpts` | custom option used to enabled JMX on tomcat jvm evaluated as template | `-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.port=5555 -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.local.only=true` | +| `metrics.jmx.image.registry` | JMX exporter image registry | `REGISTRY_NAME` | +| `metrics.jmx.image.repository` | JMX exporter image repository | `REPOSITORY_NAME/jmx-exporter` | +| `metrics.jmx.image.digest` | JMX exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `metrics.jmx.image.pullPolicy` | JMX exporter image pull policy | `IfNotPresent` | +| `metrics.jmx.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `metrics.jmx.config` | Configuration file for JMX exporter | `""` | +| `metrics.jmx.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` | +| `metrics.jmx.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `metrics.jmx.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` | +| `metrics.jmx.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` | +| `metrics.jmx.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `false` | +| `metrics.jmx.containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` | +| `metrics.jmx.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `metrics.jmx.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `metrics.jmx.resources.limits` | JMX Exporter container resource limits | `{}` | +| `metrics.jmx.resources.requests` | JMX Exporter container resource requests | `{}` | +| `metrics.jmx.ports.metrics` | JMX Exporter container metrics ports | `5556` | +| `metrics.jmx.existingConfigmap` | Name of existing ConfigMap with JMX exporter configuration | `""` | +| `metrics.podMonitor.podTargetLabels` | Used to keep given pod's labels in target | `[]` | +| `metrics.podMonitor.enabled` | Create PodMonitor Resource for scraping metrics using PrometheusOperator | `false` | +| `metrics.podMonitor.namespace` | Optional namespace in which Prometheus is running | `""` | +| `metrics.podMonitor.interval` | Specify the interval at which metrics should be scraped | `30s` | +| `metrics.podMonitor.scrapeTimeout` | Specify the timeout after which the scrape is ended | `30s` | +| `metrics.podMonitor.additionalLabels` | Additional labels that can be used so PodMonitors will be discovered by Prometheus | `{}` | +| `metrics.podMonitor.scheme` | Scheme to use for scraping | `http` | +| `metrics.podMonitor.tlsConfig` | TLS configuration used for scrape endpoints used by Prometheus | `{}` | +| `metrics.podMonitor.relabelings` | Prometheus relabeling rules | `[]` | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | `""` | +| `metrics.prometheusRule.rules` | Create specified [Rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) | `[]` | The above parameters map to the env variables defined in [bitnami/tomcat](https://github.com/bitnami/containers/tree/main/bitnami/tomcat). For more information please refer to the [bitnami/tomcat](https://github.com/bitnami/containers/tree/main/bitnami/tomcat) image documentation. @@ -250,9 +261,11 @@ Specify each parameter using the `--set key=value[,key=value]` argument to `helm ```console helm install my-release \ - --set tomcatUsername=manager,tomcatPassword=password oci://registry-1.docker.io/bitnamicharts/tomcat + --set tomcatUsername=manager,tomcatPassword=password oci://REGISTRY_NAME/REPOSITORY_NAME/tomcat ``` +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. + The above command sets the Tomcat management username and password to `manager` and `password` respectively. > NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available. @@ -260,9 +273,10 @@ The above command sets the Tomcat management username and password to `manager` Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, ```console -helm install my-release -f values.yaml oci://registry-1.docker.io/bitnamicharts/tomcat +helm install my-release -f values.yaml oci://REGISTRY_NAME/REPOSITORY_NAME/tomcat ``` +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. > **Tip**: You can use the default [values.yaml](values.yaml) ## Configuration and installation details @@ -346,9 +360,11 @@ Consequences: ```console export TOMCAT_PASSWORD=$(kubectl get secret --namespace default tomcat -o jsonpath="{.data.tomcat-password}" | base64 -d) kubectl delete deployments.apps tomcat -helm upgrade tomcat oci://registry-1.docker.io/bitnamicharts/tomcat --set tomcatPassword=$TOMCAT_PASSWORD +helm upgrade tomcat oci://REGISTRY_NAME/REPOSITORY_NAME/tomcat --set tomcatPassword=$TOMCAT_PASSWORD ``` +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. + ### To 7.0.0 [On November 13, 2020, Helm v2 support formally ended](https://github.com/helm/charts#status-of-the-project). This major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. @@ -364,15 +380,19 @@ This release updates the Bitnami Tomcat container to `9.0.26-debian-9-r0`, which Tomcat container was moved to a non-root approach. There shouldn't be any issue when upgrading since the corresponding `securityContext` is enabled by default. Both the container image and the chart can be upgraded by running the command below: ```console -helm upgrade my-release oci://registry-1.docker.io/bitnamicharts/tomcat +helm upgrade my-release oci://REGISTRY_NAME/REPOSITORY_NAME/tomcat ``` +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. + If you use a previous container image (previous to **8.5.35-r26**) disable the `securityContext` by running the command below: ```console -helm upgrade my-release oci://registry-1.docker.io/bitnamicharts/tomcat --set securityContext.enabled=false,image.tag=XXX +helm upgrade my-release oci://REGISTRY_NAME/REPOSITORY_NAME/tomcat --set securityContext.enabled=false,image.tag=XXX ``` +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. + ### To 1.0.0 Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. diff --git a/charts/bitnami/tomcat/charts/common/Chart.yaml b/charts/bitnami/tomcat/charts/common/Chart.yaml index 961b90f48..40cd22d77 100644 --- a/charts/bitnami/tomcat/charts/common/Chart.yaml +++ b/charts/bitnami/tomcat/charts/common/Chart.yaml @@ -2,7 +2,7 @@ annotations: category: Infrastructure licenses: Apache-2.0 apiVersion: v2 -appVersion: 2.13.2 +appVersion: 2.13.3 description: A Library Helm Chart for grouping common logic between bitnami charts. This chart is not deployable by itself. home: https://bitnami.com @@ -20,4 +20,4 @@ name: common sources: - https://github.com/bitnami/charts type: library -version: 2.13.2 +version: 2.13.3 diff --git a/charts/bitnami/tomcat/charts/common/README.md b/charts/bitnami/tomcat/charts/common/README.md index fe6a01000..80da4cc2f 100644 --- a/charts/bitnami/tomcat/charts/common/README.md +++ b/charts/bitnami/tomcat/charts/common/README.md @@ -34,8 +34,8 @@ Looking to use our applications in production? Try [VMware Application Catalog]( ## Prerequisites -- Kubernetes 1.19+ -- Helm 3.2.0+ +- Kubernetes 1.23+ +- Helm 3.8.0+ ## Parameters diff --git a/charts/bitnami/tomcat/charts/common/templates/_capabilities.tpl b/charts/bitnami/tomcat/charts/common/templates/_capabilities.tpl index b1257397d..115674af8 100644 --- a/charts/bitnami/tomcat/charts/common/templates/_capabilities.tpl +++ b/charts/bitnami/tomcat/charts/common/templates/_capabilities.tpl @@ -184,7 +184,7 @@ Returns true if PodSecurityPolicy is supported {{/* Returns true if AdmissionConfiguration is supported */}} -{{- define "common.capabilities.admisionConfiguration.supported" -}} +{{- define "common.capabilities.admissionConfiguration.supported" -}} {{- if semverCompare ">=1.23-0" (include "common.capabilities.kubeVersion" .) -}} {{- true -}} {{- end -}} @@ -193,7 +193,7 @@ Returns true if AdmissionConfiguration is supported {{/* Return the appropriate apiVersion for AdmissionConfiguration. */}} -{{- define "common.capabilities.admisionConfiguration.apiVersion" -}} +{{- define "common.capabilities.admissionConfiguration.apiVersion" -}} {{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) -}} {{- print "apiserver.config.k8s.io/v1alpha1" -}} {{- else if semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .) -}} diff --git a/charts/bitnami/tomcat/values.yaml b/charts/bitnami/tomcat/values.yaml index 729c54fae..70e61d55e 100644 --- a/charts/bitnami/tomcat/values.yaml +++ b/charts/bitnami/tomcat/values.yaml @@ -50,9 +50,9 @@ extraDeploy: [] ## Bitnami Tomcat image version ## ref: https://hub.docker.com/r/bitnami/tomcat/tags/ -## @param image.registry Tomcat image registry -## @param image.repository Tomcat image repository -## @param image.tag Tomcat image tag (immutable tags are recommended) +## @param image.registry [default: REGISTRY_NAME] Tomcat image registry +## @param image.repository [default: REPOSITORY_NAME/tomcat] Tomcat image repository +## @skip image.tag Tomcat image tag (immutable tags are recommended) ## @param image.digest Tomcat image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag ## @param image.pullPolicy Tomcat image pull policy ## @param image.pullSecrets Specify docker-registry secret names as an array @@ -159,14 +159,26 @@ podSecurityContext: fsGroup: 1001 ## Tomcat containers' SecurityContext ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod -## @param containerSecurityContext.enabled Enable Tomcat containers' SecurityContext -## @param containerSecurityContext.runAsUser User ID for the Tomcat container -## @param containerSecurityContext.runAsNonRoot Force user to be root in Tomcat container +## @param containerSecurityContext.enabled Enabled containers' Security Context +## @param containerSecurityContext.runAsUser Set containers' Security Context runAsUser +## @param containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot +## @param containerSecurityContext.privileged Set container's Security Context privileged +## @param containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem +## @param containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation +## @param containerSecurityContext.capabilities.drop List of capabilities to be dropped +## @param containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile ## containerSecurityContext: enabled: true runAsUser: 1001 runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: false + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" ## Tomcat containers' resource requests and limits ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ ## We usually recommend not to specify default resources and to leave this as a conscious @@ -575,9 +587,9 @@ volumePermissions: ## @param volumePermissions.enabled Enable init container that changes volume permissions in the data directory ## enabled: false - ## @param volumePermissions.image.registry Init container volume-permissions image registry - ## @param volumePermissions.image.repository Init container volume-permissions image repository - ## @param volumePermissions.image.tag Init container volume-permissions image tag + ## @param volumePermissions.image.registry [default: REGISTRY_NAME] Init container volume-permissions image registry + ## @param volumePermissions.image.repository [default: REPOSITORY_NAME/os-shell] Init container volume-permissions image repository + ## @skip volumePermissions.image.tag Init container volume-permissions image tag ## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy ## @param volumePermissions.image.pullSecrets Specify docker-registry secret names as an array @@ -635,9 +647,9 @@ metrics: catalinaOpts: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.port=5555 -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.local.only=true ## Bitnami JMX exporter image ## ref: https://hub.docker.com/r/bitnami/jmx-exporter/tags/ - ## @param metrics.jmx.image.registry JMX exporter image registry - ## @param metrics.jmx.image.repository JMX exporter image repository - ## @param metrics.jmx.image.tag JMX exporter image tag (immutable tags are recommended) + ## @param metrics.jmx.image.registry [default: REGISTRY_NAME] JMX exporter image registry + ## @param metrics.jmx.image.repository [default: REPOSITORY_NAME/jmx-exporter] JMX exporter image repository + ## @skip metrics.jmx.image.tag JMX exporter image tag (immutable tags are recommended) ## @param metrics.jmx.image.digest JMX exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag ## @param metrics.jmx.image.pullPolicy JMX exporter image pull policy ## @param metrics.jmx.image.pullSecrets Specify docker-registry secret names as an array @@ -671,13 +683,25 @@ metrics: attrNameSnakeCase: true ## Prometheus JMX exporter containers' Security Context ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container - ## @param metrics.jmx.containerSecurityContext.enabled Enable Prometheus JMX exporter containers' Security Context - ## @param metrics.jmx.containerSecurityContext.runAsUser Set Prometheus JMX exporter containers' Security Context runAsUser - ## @param metrics.jmx.containerSecurityContext.runAsNonRoot Set Prometheus JMX exporter containers' Security Context runAsNonRoot + ## @param metrics.jmx.containerSecurityContext.enabled Enabled containers' Security Context + ## @param metrics.jmx.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param metrics.jmx.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot + ## @param metrics.jmx.containerSecurityContext.privileged Set container's Security Context privileged + ## @param metrics.jmx.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem + ## @param metrics.jmx.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation + ## @param metrics.jmx.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param metrics.jmx.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile containerSecurityContext: enabled: true runAsUser: 1001 runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: false + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" ## Prometheus JMX Exporter' resource requests and limits ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ ## We usually recommend not to specify default resources and to leave this as a conscious diff --git a/charts/bitnami/wordpress/Chart.yaml b/charts/bitnami/wordpress/Chart.yaml index fcb1b77cd..afa2f6456 100644 --- a/charts/bitnami/wordpress/Chart.yaml +++ b/charts/bitnami/wordpress/Chart.yaml @@ -10,7 +10,7 @@ annotations: - name: os-shell image: docker.io/bitnami/os-shell:11-debian-11-r90 - name: wordpress - image: docker.io/bitnami/wordpress:6.3.2-debian-11-r5 + image: docker.io/bitnami/wordpress:6.3.2-debian-11-r8 licenses: Apache-2.0 apiVersion: v2 appVersion: 6.3.2 @@ -47,4 +47,4 @@ maintainers: name: wordpress sources: - https://github.com/bitnami/charts/tree/main/bitnami/wordpress -version: 18.0.12 +version: 18.1.3 diff --git a/charts/bitnami/wordpress/README.md b/charts/bitnami/wordpress/README.md index eef9beaf4..26220a360 100644 --- a/charts/bitnami/wordpress/README.md +++ b/charts/bitnami/wordpress/README.md @@ -172,13 +172,14 @@ The command removes all the Kubernetes components associated with the chart and | `extraContainerPorts` | Optionally specify extra list of additional ports for WordPress container(s) | `[]` | | `podSecurityContext.enabled` | Enabled WordPress pods' Security Context | `true` | | `podSecurityContext.fsGroup` | Set WordPress pod's Security Context fsGroup | `1001` | -| `podSecurityContext.seccompProfile.type` | Set WordPress container's Security Context seccomp profile | `RuntimeDefault` | -| `containerSecurityContext.enabled` | Enabled WordPress containers' Security Context | `true` | -| `containerSecurityContext.runAsUser` | Set WordPress container's Security Context runAsUser | `1001` | -| `containerSecurityContext.runAsNonRoot` | Set WordPress container's Security Context runAsNonRoot | `true` | -| `containerSecurityContext.allowPrivilegeEscalation` | Set WordPress container's privilege escalation | `false` | -| `containerSecurityContext.readOnlyRootFilesystem` | Set WordPress container's Security Context readOnlyRootFilesystem | `false` | -| `containerSecurityContext.capabilities.drop` | Set WordPress container's Security Context runAsNonRoot | `["ALL"]` | +| `containerSecurityContext.enabled` | Enabled containers' Security Context | `true` | +| `containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` | +| `containerSecurityContext.privileged` | Set container's Security Context privileged | `false` | +| `containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `false` | +| `containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` | +| `containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | | `livenessProbe.enabled` | Enable livenessProbe on WordPress containers | `true` | | `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `120` | | `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | diff --git a/charts/bitnami/wordpress/values.yaml b/charts/bitnami/wordpress/values.yaml index 43cf568dd..a35b2a355 100644 --- a/charts/bitnami/wordpress/values.yaml +++ b/charts/bitnami/wordpress/values.yaml @@ -76,7 +76,7 @@ diagnosticMode: image: registry: docker.io repository: bitnami/wordpress - tag: 6.3.2-debian-11-r5 + tag: 6.3.2-debian-11-r8 digest: "" ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' @@ -402,30 +402,32 @@ extraContainerPorts: [] ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod ## @param podSecurityContext.enabled Enabled WordPress pods' Security Context ## @param podSecurityContext.fsGroup Set WordPress pod's Security Context fsGroup -## @param podSecurityContext.seccompProfile.type Set WordPress container's Security Context seccomp profile ## podSecurityContext: enabled: true fsGroup: 1001 - seccompProfile: - type: "RuntimeDefault" ## Configure Container Security Context (only main container) ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container -## @param containerSecurityContext.enabled Enabled WordPress containers' Security Context -## @param containerSecurityContext.runAsUser Set WordPress container's Security Context runAsUser -## @param containerSecurityContext.runAsNonRoot Set WordPress container's Security Context runAsNonRoot -## @param containerSecurityContext.allowPrivilegeEscalation Set WordPress container's privilege escalation -## @param containerSecurityContext.readOnlyRootFilesystem Set WordPress container's Security Context readOnlyRootFilesystem -## @param containerSecurityContext.capabilities.drop Set WordPress container's Security Context runAsNonRoot +## @param containerSecurityContext.enabled Enabled containers' Security Context +## @param containerSecurityContext.runAsUser Set containers' Security Context runAsUser +## @param containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot +## @param containerSecurityContext.privileged Set container's Security Context privileged +## @param containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem +## @param containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation +## @param containerSecurityContext.capabilities.drop List of capabilities to be dropped +## @param containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile ## containerSecurityContext: enabled: true runAsUser: 1001 runAsNonRoot: true - allowPrivilegeEscalation: false + privileged: false readOnlyRootFilesystem: false + allowPrivilegeEscalation: false capabilities: drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" ## Configure extra options for WordPress containers' liveness, readiness and startup probes ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes ## @param livenessProbe.enabled Enable livenessProbe on WordPress containers diff --git a/charts/bitnami/zookeeper/Chart.yaml b/charts/bitnami/zookeeper/Chart.yaml index fa8e1ec44..c98d53009 100644 --- a/charts/bitnami/zookeeper/Chart.yaml +++ b/charts/bitnami/zookeeper/Chart.yaml @@ -30,4 +30,4 @@ maintainers: name: zookeeper sources: - https://github.com/bitnami/charts/tree/main/bitnami/zookeeper -version: 12.1.6 +version: 12.3.0 diff --git a/charts/bitnami/zookeeper/README.md b/charts/bitnami/zookeeper/README.md index 2c6685664..c49199542 100644 --- a/charts/bitnami/zookeeper/README.md +++ b/charts/bitnami/zookeeper/README.md @@ -11,16 +11,18 @@ Trademarks: This software listing is packaged by Bitnami. The respective tradema ## TL;DR ```console -helm install my-release oci://registry-1.docker.io/bitnamicharts/zookeeper +helm install my-release oci://REGISTRY_NAME/REPOSITORY_NAME/zookeeper ``` +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. + ## Introduction This chart bootstraps a [ZooKeeper](https://github.com/bitnami/containers/tree/main/bitnami/zookeeper) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. -Looking to use Apache ZooKeeper in production? Try [VMware Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog. +Looking to use Apache ZooKeeper in production? Try [VMware Tanzu Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog. ## Prerequisites @@ -33,9 +35,11 @@ Looking to use Apache ZooKeeper in production? Try [VMware Application Catalog]( To install the chart with the release name `my-release`: ```console -helm install my-release oci://registry-1.docker.io/bitnamicharts/zookeeper +helm install my-release oci://REGISTRY_NAME/REPOSITORY_NAME/zookeeper ``` +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. + These commands deploy ZooKeeper on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. > **Tip**: List all releases using `helm list` @@ -78,117 +82,121 @@ The command removes all the Kubernetes components associated with the chart and ### ZooKeeper chart parameters -| Name | Description | Value | -| ----------------------------- | -------------------------------------------------------------------------------------------------------------------------- | ----------------------- | -| `image.registry` | ZooKeeper image registry | `docker.io` | -| `image.repository` | ZooKeeper image repository | `bitnami/zookeeper` | -| `image.tag` | ZooKeeper image tag (immutable tags are recommended) | `3.9.1-debian-11-r1` | -| `image.digest` | ZooKeeper image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | -| `image.pullPolicy` | ZooKeeper image pull policy | `IfNotPresent` | -| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | -| `image.debug` | Specify if debug values should be set | `false` | -| `auth.client.enabled` | Enable ZooKeeper client-server authentication. It uses SASL/Digest-MD5 | `false` | -| `auth.client.clientUser` | User that will use ZooKeeper clients to auth | `""` | -| `auth.client.clientPassword` | Password that will use ZooKeeper clients to auth | `""` | -| `auth.client.serverUsers` | Comma, semicolon or whitespace separated list of user to be created | `""` | -| `auth.client.serverPasswords` | Comma, semicolon or whitespace separated list of passwords to assign to users when created | `""` | -| `auth.client.existingSecret` | Use existing secret (ignores previous passwords) | `""` | -| `auth.quorum.enabled` | Enable ZooKeeper server-server authentication. It uses SASL/Digest-MD5 | `false` | -| `auth.quorum.learnerUser` | User that the ZooKeeper quorumLearner will use to authenticate to quorumServers. | `""` | -| `auth.quorum.learnerPassword` | Password that the ZooKeeper quorumLearner will use to authenticate to quorumServers. | `""` | -| `auth.quorum.serverUsers` | Comma, semicolon or whitespace separated list of users for the quorumServers. | `""` | -| `auth.quorum.serverPasswords` | Comma, semicolon or whitespace separated list of passwords to assign to users when created | `""` | -| `auth.quorum.existingSecret` | Use existing secret (ignores previous passwords) | `""` | -| `tickTime` | Basic time unit (in milliseconds) used by ZooKeeper for heartbeats | `2000` | -| `initLimit` | ZooKeeper uses to limit the length of time the ZooKeeper servers in quorum have to connect to a leader | `10` | -| `syncLimit` | How far out of date a server can be from a leader | `5` | -| `preAllocSize` | Block size for transaction log file | `65536` | -| `snapCount` | The number of transactions recorded in the transaction log before a snapshot can be taken (and the transaction log rolled) | `100000` | -| `maxClientCnxns` | Limits the number of concurrent connections that a single client may make to a single member of the ZooKeeper ensemble | `60` | -| `maxSessionTimeout` | Maximum session timeout (in milliseconds) that the server will allow the client to negotiate | `40000` | -| `heapSize` | Size (in MB) for the Java Heap options (Xmx and Xms) | `1024` | -| `fourlwCommandsWhitelist` | A list of comma separated Four Letter Words commands that can be executed | `srvr, mntr, ruok` | -| `minServerId` | Minimal SERVER_ID value, nodes increment their IDs respectively | `1` | -| `listenOnAllIPs` | Allow ZooKeeper to listen for connections from its peers on all available IP addresses | `false` | -| `autopurge.snapRetainCount` | The most recent snapshots amount (and corresponding transaction logs) to retain | `3` | -| `autopurge.purgeInterval` | The time interval (in hours) for which the purge task has to be triggered | `0` | -| `logLevel` | Log level for the ZooKeeper server. ERROR by default | `ERROR` | -| `jvmFlags` | Default JVM flags for the ZooKeeper process | `""` | -| `dataLogDir` | Dedicated data log directory | `""` | -| `configuration` | Configure ZooKeeper with a custom zoo.cfg file | `""` | -| `existingConfigmap` | The name of an existing ConfigMap with your custom configuration for ZooKeeper | `""` | -| `extraEnvVars` | Array with extra environment variables to add to ZooKeeper nodes | `[]` | -| `extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for ZooKeeper nodes | `""` | -| `extraEnvVarsSecret` | Name of existing Secret containing extra env vars for ZooKeeper nodes | `""` | -| `command` | Override default container command (useful when using custom images) | `["/scripts/setup.sh"]` | -| `args` | Override default container args (useful when using custom images) | `[]` | +| Name | Description | Value | +| ----------------------------- | -------------------------------------------------------------------------------------------------------------------------- | --------------------------- | +| `image.registry` | ZooKeeper image registry | `REGISTRY_NAME` | +| `image.repository` | ZooKeeper image repository | `REPOSITORY_NAME/zookeeper` | +| `image.digest` | ZooKeeper image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `image.pullPolicy` | ZooKeeper image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `image.debug` | Specify if debug values should be set | `false` | +| `auth.client.enabled` | Enable ZooKeeper client-server authentication. It uses SASL/Digest-MD5 | `false` | +| `auth.client.clientUser` | User that will use ZooKeeper clients to auth | `""` | +| `auth.client.clientPassword` | Password that will use ZooKeeper clients to auth | `""` | +| `auth.client.serverUsers` | Comma, semicolon or whitespace separated list of user to be created | `""` | +| `auth.client.serverPasswords` | Comma, semicolon or whitespace separated list of passwords to assign to users when created | `""` | +| `auth.client.existingSecret` | Use existing secret (ignores previous passwords) | `""` | +| `auth.quorum.enabled` | Enable ZooKeeper server-server authentication. It uses SASL/Digest-MD5 | `false` | +| `auth.quorum.learnerUser` | User that the ZooKeeper quorumLearner will use to authenticate to quorumServers. | `""` | +| `auth.quorum.learnerPassword` | Password that the ZooKeeper quorumLearner will use to authenticate to quorumServers. | `""` | +| `auth.quorum.serverUsers` | Comma, semicolon or whitespace separated list of users for the quorumServers. | `""` | +| `auth.quorum.serverPasswords` | Comma, semicolon or whitespace separated list of passwords to assign to users when created | `""` | +| `auth.quorum.existingSecret` | Use existing secret (ignores previous passwords) | `""` | +| `tickTime` | Basic time unit (in milliseconds) used by ZooKeeper for heartbeats | `2000` | +| `initLimit` | ZooKeeper uses to limit the length of time the ZooKeeper servers in quorum have to connect to a leader | `10` | +| `syncLimit` | How far out of date a server can be from a leader | `5` | +| `preAllocSize` | Block size for transaction log file | `65536` | +| `snapCount` | The number of transactions recorded in the transaction log before a snapshot can be taken (and the transaction log rolled) | `100000` | +| `maxClientCnxns` | Limits the number of concurrent connections that a single client may make to a single member of the ZooKeeper ensemble | `60` | +| `maxSessionTimeout` | Maximum session timeout (in milliseconds) that the server will allow the client to negotiate | `40000` | +| `heapSize` | Size (in MB) for the Java Heap options (Xmx and Xms) | `1024` | +| `fourlwCommandsWhitelist` | A list of comma separated Four Letter Words commands that can be executed | `srvr, mntr, ruok` | +| `minServerId` | Minimal SERVER_ID value, nodes increment their IDs respectively | `1` | +| `listenOnAllIPs` | Allow ZooKeeper to listen for connections from its peers on all available IP addresses | `false` | +| `autopurge.snapRetainCount` | The most recent snapshots amount (and corresponding transaction logs) to retain | `3` | +| `autopurge.purgeInterval` | The time interval (in hours) for which the purge task has to be triggered | `0` | +| `logLevel` | Log level for the ZooKeeper server. ERROR by default | `ERROR` | +| `jvmFlags` | Default JVM flags for the ZooKeeper process | `""` | +| `dataLogDir` | Dedicated data log directory | `""` | +| `configuration` | Configure ZooKeeper with a custom zoo.cfg file | `""` | +| `existingConfigmap` | The name of an existing ConfigMap with your custom configuration for ZooKeeper | `""` | +| `extraEnvVars` | Array with extra environment variables to add to ZooKeeper nodes | `[]` | +| `extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for ZooKeeper nodes | `""` | +| `extraEnvVarsSecret` | Name of existing Secret containing extra env vars for ZooKeeper nodes | `""` | +| `command` | Override default container command (useful when using custom images) | `["/scripts/setup.sh"]` | +| `args` | Override default container args (useful when using custom images) | `[]` | ### Statefulset parameters -| Name | Description | Value | -| --------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------- | -| `replicaCount` | Number of ZooKeeper nodes | `1` | -| `containerPorts.client` | ZooKeeper client container port | `2181` | -| `containerPorts.tls` | ZooKeeper TLS container port | `3181` | -| `containerPorts.follower` | ZooKeeper follower container port | `2888` | -| `containerPorts.election` | ZooKeeper election container port | `3888` | -| `livenessProbe.enabled` | Enable livenessProbe on ZooKeeper containers | `true` | -| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `30` | -| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | -| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | -| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | -| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | -| `livenessProbe.probeCommandTimeout` | Probe command timeout for livenessProbe | `2` | -| `readinessProbe.enabled` | Enable readinessProbe on ZooKeeper containers | `true` | -| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | -| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | -| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | -| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | -| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | -| `readinessProbe.probeCommandTimeout` | Probe command timeout for readinessProbe | `2` | -| `startupProbe.enabled` | Enable startupProbe on ZooKeeper containers | `false` | -| `startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` | -| `startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | -| `startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | -| `startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | -| `startupProbe.successThreshold` | Success threshold for startupProbe | `1` | -| `customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | -| `customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | -| `customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | -| `lifecycleHooks` | for the ZooKeeper container(s) to automate configuration before or after startup | `{}` | -| `resources.limits` | The resources limits for the ZooKeeper containers | `{}` | -| `resources.requests.memory` | The requested memory for the ZooKeeper containers | `256Mi` | -| `resources.requests.cpu` | The requested cpu for the ZooKeeper containers | `250m` | -| `podSecurityContext.enabled` | Enabled ZooKeeper pods' Security Context | `true` | -| `podSecurityContext.fsGroup` | Set ZooKeeper pod's Security Context fsGroup | `1001` | -| `containerSecurityContext.enabled` | Enabled ZooKeeper containers' Security Context | `true` | -| `containerSecurityContext.runAsUser` | Set ZooKeeper containers' Security Context runAsUser | `1001` | -| `containerSecurityContext.runAsNonRoot` | Set ZooKeeper containers' Security Context runAsNonRoot | `true` | -| `containerSecurityContext.allowPrivilegeEscalation` | Force the child process to be run as nonprivilege | `false` | -| `hostAliases` | ZooKeeper pods host aliases | `[]` | -| `podLabels` | Extra labels for ZooKeeper pods | `{}` | -| `podAnnotations` | Annotations for ZooKeeper pods | `{}` | -| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | -| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | -| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | -| `nodeAffinityPreset.key` | Node label key to match Ignored if `affinity` is set. | `""` | -| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | -| `affinity` | Affinity for pod assignment | `{}` | -| `nodeSelector` | Node labels for pod assignment | `{}` | -| `tolerations` | Tolerations for pod assignment | `[]` | -| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | -| `podManagementPolicy` | StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: `OrderedReady` and `Parallel` | `Parallel` | -| `priorityClassName` | Name of the existing priority class to be used by ZooKeeper pods, priority class needs to be created beforehand | `""` | -| `schedulerName` | Kubernetes pod scheduler registry | `""` | -| `updateStrategy.type` | ZooKeeper statefulset strategy type | `RollingUpdate` | -| `updateStrategy.rollingUpdate` | ZooKeeper statefulset rolling update configuration parameters | `{}` | -| `extraVolumes` | Optionally specify extra list of additional volumes for the ZooKeeper pod(s) | `[]` | -| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the ZooKeeper container(s) | `[]` | -| `sidecars` | Add additional sidecar containers to the ZooKeeper pod(s) | `[]` | -| `initContainers` | Add additional init containers to the ZooKeeper pod(s) | `[]` | -| `pdb.create` | Deploy a pdb object for the ZooKeeper pod | `false` | -| `pdb.minAvailable` | Minimum available ZooKeeper replicas | `""` | -| `pdb.maxUnavailable` | Maximum unavailable ZooKeeper replicas | `1` | +| Name | Description | Value | +| --------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------- | +| `replicaCount` | Number of ZooKeeper nodes | `1` | +| `containerPorts.client` | ZooKeeper client container port | `2181` | +| `containerPorts.tls` | ZooKeeper TLS container port | `3181` | +| `containerPorts.follower` | ZooKeeper follower container port | `2888` | +| `containerPorts.election` | ZooKeeper election container port | `3888` | +| `livenessProbe.enabled` | Enable livenessProbe on ZooKeeper containers | `true` | +| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `30` | +| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `livenessProbe.probeCommandTimeout` | Probe command timeout for livenessProbe | `2` | +| `readinessProbe.enabled` | Enable readinessProbe on ZooKeeper containers | `true` | +| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `readinessProbe.probeCommandTimeout` | Probe command timeout for readinessProbe | `2` | +| `startupProbe.enabled` | Enable startupProbe on ZooKeeper containers | `false` | +| `startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` | +| `startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `lifecycleHooks` | for the ZooKeeper container(s) to automate configuration before or after startup | `{}` | +| `resources.limits` | The resources limits for the ZooKeeper containers | `{}` | +| `resources.requests.memory` | The requested memory for the ZooKeeper containers | `256Mi` | +| `resources.requests.cpu` | The requested cpu for the ZooKeeper containers | `250m` | +| `podSecurityContext.enabled` | Enabled ZooKeeper pods' Security Context | `true` | +| `podSecurityContext.fsGroup` | Set ZooKeeper pod's Security Context fsGroup | `1001` | +| `containerSecurityContext.enabled` | Enabled containers' Security Context | `true` | +| `containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` | +| `containerSecurityContext.privileged` | Set container's Security Context privileged | `false` | +| `containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `false` | +| `containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` | +| `containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `hostAliases` | ZooKeeper pods host aliases | `[]` | +| `podLabels` | Extra labels for ZooKeeper pods | `{}` | +| `podAnnotations` | Annotations for ZooKeeper pods | `{}` | +| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `nodeAffinityPreset.key` | Node label key to match Ignored if `affinity` is set. | `""` | +| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | +| `affinity` | Affinity for pod assignment | `{}` | +| `nodeSelector` | Node labels for pod assignment | `{}` | +| `tolerations` | Tolerations for pod assignment | `[]` | +| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `podManagementPolicy` | StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: `OrderedReady` and `Parallel` | `Parallel` | +| `priorityClassName` | Name of the existing priority class to be used by ZooKeeper pods, priority class needs to be created beforehand | `""` | +| `schedulerName` | Kubernetes pod scheduler registry | `""` | +| `updateStrategy.type` | ZooKeeper statefulset strategy type | `RollingUpdate` | +| `updateStrategy.rollingUpdate` | ZooKeeper statefulset rolling update configuration parameters | `{}` | +| `extraVolumes` | Optionally specify extra list of additional volumes for the ZooKeeper pod(s) | `[]` | +| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the ZooKeeper container(s) | `[]` | +| `sidecars` | Add additional sidecar containers to the ZooKeeper pod(s) | `[]` | +| `initContainers` | Add additional init containers to the ZooKeeper pod(s) | `[]` | +| `pdb.create` | Deploy a pdb object for the ZooKeeper pod | `false` | +| `pdb.minAvailable` | Minimum available ZooKeeper replicas | `""` | +| `pdb.maxUnavailable` | Maximum unavailable ZooKeeper replicas | `1` | +| `enableServiceLinks` | Whether information about services should be injected into pod's environment variable | `true` | ### Traffic Exposure parameters @@ -243,19 +251,18 @@ The command removes all the Kubernetes components associated with the chart and ### Volume Permissions parameters -| Name | Description | Value | -| ------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------- | ------------------ | -| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume | `false` | -| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | -| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/os-shell` | -| `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `11-debian-11-r90` | -| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | -| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | -| `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` | -| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` | -| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` | -| `volumePermissions.containerSecurityContext.enabled` | Enabled init container Security Context | `true` | -| `volumePermissions.containerSecurityContext.runAsUser` | User ID for the init container | `0` | +| Name | Description | Value | +| ------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------- | -------------------------- | +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `REGISTRY_NAME` | +| `volumePermissions.image.repository` | Init container volume-permissions image repository | `REPOSITORY_NAME/os-shell` | +| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` | +| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` | +| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` | +| `volumePermissions.containerSecurityContext.enabled` | Enabled init container Security Context | `true` | +| `volumePermissions.containerSecurityContext.runAsUser` | User ID for the init container | `0` | ### Metrics parameters @@ -319,9 +326,11 @@ Specify each parameter using the `--set key=value[,key=value]` argument to `helm ```console helm install my-release \ --set auth.clientUser=newUser \ - oci://registry-1.docker.io/bitnamicharts/zookeeper + oci://REGISTRY_NAME/REPOSITORY_NAME/zookeeper ``` +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. + The above command sets the ZooKeeper user to `newUser`. > NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available. @@ -329,9 +338,10 @@ The above command sets the ZooKeeper user to `newUser`. Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, ```console -helm install my-release -f values.yaml oci://registry-1.docker.io/bitnamicharts/zookeeper +helm install my-release -f values.yaml oci://REGISTRY_NAME/REPOSITORY_NAME/zookeeper ``` +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. > **Tip**: You can use the default [values.yaml](values.yaml) ## Configuration and installation details @@ -525,4 +535,4 @@ Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and -limitations under the License. \ No newline at end of file +limitations under the License. diff --git a/charts/bitnami/zookeeper/templates/statefulset.yaml b/charts/bitnami/zookeeper/templates/statefulset.yaml index 9c9b5dfcf..cd8db49d6 100644 --- a/charts/bitnami/zookeeper/templates/statefulset.yaml +++ b/charts/bitnami/zookeeper/templates/statefulset.yaml @@ -43,6 +43,7 @@ spec: labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} app.kubernetes.io/component: zookeeper spec: + enableServiceLinks: {{ .Values.enableServiceLinks }} serviceAccountName: {{ template "zookeeper.serviceAccountName" . }} {{- include "zookeeper.imagePullSecrets" . | nindent 6 }} {{- if .Values.hostAliases }} diff --git a/charts/bitnami/zookeeper/values.yaml b/charts/bitnami/zookeeper/values.yaml index 06bcb14e8..825d00e5d 100644 --- a/charts/bitnami/zookeeper/values.yaml +++ b/charts/bitnami/zookeeper/values.yaml @@ -68,9 +68,9 @@ diagnosticMode: ## Bitnami ZooKeeper image version ## ref: https://hub.docker.com/r/bitnami/zookeeper/tags/ -## @param image.registry ZooKeeper image registry -## @param image.repository ZooKeeper image repository -## @param image.tag ZooKeeper image tag (immutable tags are recommended) +## @param image.registry [default: REGISTRY_NAME] ZooKeeper image registry +## @param image.repository [default: REPOSITORY_NAME/zookeeper] ZooKeeper image repository +## @skip image.tag ZooKeeper image tag (immutable tags are recommended) ## @param image.digest ZooKeeper image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag ## @param image.pullPolicy ZooKeeper image pull policy ## @param image.pullSecrets Specify docker-registry secret names as an array @@ -332,16 +332,26 @@ podSecurityContext: fsGroup: 1001 ## Configure Container Security Context ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container -## @param containerSecurityContext.enabled Enabled ZooKeeper containers' Security Context -## @param containerSecurityContext.runAsUser Set ZooKeeper containers' Security Context runAsUser -## @param containerSecurityContext.runAsNonRoot Set ZooKeeper containers' Security Context runAsNonRoot -## @param containerSecurityContext.allowPrivilegeEscalation Force the child process to be run as nonprivilege +## @param containerSecurityContext.enabled Enabled containers' Security Context +## @param containerSecurityContext.runAsUser Set containers' Security Context runAsUser +## @param containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot +## @param containerSecurityContext.privileged Set container's Security Context privileged +## @param containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem +## @param containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation +## @param containerSecurityContext.capabilities.drop List of capabilities to be dropped +## @param containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile ## containerSecurityContext: enabled: true runAsUser: 1001 runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: false allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" ## @param hostAliases ZooKeeper pods host aliases ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ ## @@ -475,6 +485,11 @@ pdb: create: false minAvailable: "" maxUnavailable: 1 +## @param enableServiceLinks Whether information about services should be injected into pod's environment variable +## The environment variables injected by service links are not used, but can lead to slow boot times or slow running of the scripts when there are many services in the current namespace. +## If you experience slow pod startups or slow running of the scripts you probably want to set this to `false`. +## +enableServiceLinks: true ## @section Traffic Exposure parameters @@ -653,9 +668,9 @@ volumePermissions: ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume ## enabled: false - ## @param volumePermissions.image.registry Init container volume-permissions image registry - ## @param volumePermissions.image.repository Init container volume-permissions image repository - ## @param volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended) + ## @param volumePermissions.image.registry [default: REGISTRY_NAME] Init container volume-permissions image registry + ## @param volumePermissions.image.repository [default: REPOSITORY_NAME/os-shell] Init container volume-permissions image repository + ## @skip volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended) ## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy ## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets diff --git a/charts/crowdstrike/falcon-sensor/Chart.yaml b/charts/crowdstrike/falcon-sensor/Chart.yaml index 21a4bb936..7d31e57eb 100644 --- a/charts/crowdstrike/falcon-sensor/Chart.yaml +++ b/charts/crowdstrike/falcon-sensor/Chart.yaml @@ -4,7 +4,7 @@ annotations: catalog.cattle.io/kube-version: '>1.22.0-0' catalog.cattle.io/release-name: falcon-sensor apiVersion: v2 -appVersion: 1.22.1 +appVersion: 1.23.1 description: A Helm chart to deploy CrowdStrike Falcon sensors into Kubernetes clusters. home: https://crowdstrike.com icon: https://raw.githubusercontent.com/CrowdStrike/falcon-helm/main/images/crowdstrike-logo.svg @@ -24,4 +24,4 @@ name: falcon-sensor sources: - https://github.com/CrowdStrike/falcon-helm type: application -version: 1.22.1 +version: 1.23.1 diff --git a/charts/crowdstrike/falcon-sensor/templates/_helpers.tpl b/charts/crowdstrike/falcon-sensor/templates/_helpers.tpl index 7d3982059..3f3c1efe4 100644 --- a/charts/crowdstrike/falcon-sensor/templates/_helpers.tpl +++ b/charts/crowdstrike/falcon-sensor/templates/_helpers.tpl @@ -94,26 +94,30 @@ Create the name of the service account to use {{- define "falcon-sensor.daemonsetResources" -}} {{- if .Values.node.gke.autopilot -}} -{{- if .Values.node.daemonset.resources -}} resources: - {{- if .Values.node.daemonset.resources.limits -}} + {{- if (.Values.node.daemonset.resources | default dict ).limits }} limits: - cpu: {{ .Values.node.daemonset.resources.limits.cpu | default "750m" }} - memory: {{ .Values.node.daemonset.resources.limits.memory | default "1.5Gi" }} + cpu: {{ (.Values.node.daemonset.resources.limits | default dict ).cpu | default "750m" }} + memory: {{ (.Values.node.daemonset.resources.limits | default dict ).memory | default "1.5Gi" }} + ephemeral-storage: {{ (index (.Values.node.daemonset.resources.limits | default dict ) "ephemeral-storage") | default "100Mi" }} + {{- else }} + limits: + cpu: 750m + memory: 1.5Gi + ephemeral-storage: 100Mi {{- end }} + {{- if (.Values.node.daemonset.resources | default dict ).requests }} requests: - cpu: {{ .Values.node.daemonset.resources.requests.cpu | default "750m" }} - memory: {{ .Values.node.daemonset.resources.requests.memory | default "1.5Gi" }} -{{- else -}} -resources: - limits: - cpu: "750m" - memory: "1.5Gi" + cpu: {{ (.Values.node.daemonset.resources.requests | default dict ).cpu | default "750m" }} + ephemeral-storage: {{ (index (.Values.node.daemonset.resources.requests | default dict ) "ephemeral-storage") | default "100Mi" }} + memory: {{ (.Values.node.daemonset.resources.requests | default dict ).memory | default "1.5Gi" }} + {{- else }} requests: - cpu: "750m" - memory: "1.5Gi" -{{- end -}} -{{- else -}} + cpu: 750m + memory: 1.5Gi + ephemeral-storage: 100Mi + {{- end }} + {{- else -}} {{- if .Values.node.daemonset.resources -}} {{- toYaml .Values.node.daemonset.resources -}} {{- end -}} diff --git a/charts/crowdstrike/falcon-sensor/templates/daemonset.yaml b/charts/crowdstrike/falcon-sensor/templates/daemonset.yaml index ac8495a2d..a39291045 100644 --- a/charts/crowdstrike/falcon-sensor/templates/daemonset.yaml +++ b/charts/crowdstrike/falcon-sensor/templates/daemonset.yaml @@ -108,15 +108,17 @@ spec: - name: falconstore-dir mountPath: /host_opt {{- end }} + {{- if or .Values.node.gke.autopilot .Values.node.daemonset.resources }} resources: requests: cpu: 10m - ephemeral-storage: 10Mi + ephemeral-storage: 100Mi memory: 50Mi limits: cpu: 10m - ephemeral-storage: 10Mi + ephemeral-storage: 100Mi memory: 50Mi + {{- end }} securityContext: runAsUser: 0 privileged: true diff --git a/charts/crowdstrike/falcon-sensor/templates/node_cleanup.yaml b/charts/crowdstrike/falcon-sensor/templates/node_cleanup.yaml index 2a7f0165e..81e01aef8 100644 --- a/charts/crowdstrike/falcon-sensor/templates/node_cleanup.yaml +++ b/charts/crowdstrike/falcon-sensor/templates/node_cleanup.yaml @@ -100,15 +100,17 @@ spec: - name: opt-crowdstrike mountPath: /host_opt {{- end }} + {{- if or .Values.node.gke.autopilot .Values.node.daemonset.resources }} resources: requests: cpu: 10m - ephemeral-storage: 10Mi + ephemeral-storage: 100Mi memory: 50Mi limits: cpu: 10m - ephemeral-storage: 10Mi + ephemeral-storage: 100Mi memory: 50Mi + {{- end }} securityContext: runAsUser: 0 privileged: true @@ -133,15 +135,17 @@ spec: - sleep 10 command: - /bin/bash + {{- if or .Values.node.gke.autopilot .Values.node.daemonset.resources }} resources: requests: cpu: 10m - ephemeral-storage: 10Mi + ephemeral-storage: 100Mi memory: 50Mi limits: cpu: 10m - ephemeral-storage: 10Mi + ephemeral-storage: 100Mi memory: 50Mi + {{- end }} securityContext: privileged: false readOnlyRootFilesystem: true diff --git a/charts/crowdstrike/falcon-sensor/values.yaml b/charts/crowdstrike/falcon-sensor/values.yaml index 888395ec7..4b586b9d1 100644 --- a/charts/crowdstrike/falcon-sensor/values.yaml +++ b/charts/crowdstrike/falcon-sensor/values.yaml @@ -62,9 +62,11 @@ node: # resources: # limits: # cpu: 250m + # ephemeral-storage: 100Mi # memory: 500Mi # requests: # cpu: 250m + # ephemeral-storage: 100Mi # memory: 500Mi # Update strategy to role out new daemonset configuration to the nodes. diff --git a/charts/datadog/datadog/CHANGELOG.md b/charts/datadog/datadog/CHANGELOG.md index ed50cff1b..1a6b7d4ee 100644 --- a/charts/datadog/datadog/CHANGELOG.md +++ b/charts/datadog/datadog/CHANGELOG.md @@ -1,5 +1,13 @@ # Datadog changelog +## 3.43.1 + +* Fix docstring typos and remove unneeded lines. + +## 3.43.0 + +* Default `Agent` and `Cluster-Agent` to `7.49.0` version. + ## 3.42.1 * Bump FIPS proxy OpenSSL version to 3.0.12 diff --git a/charts/datadog/datadog/Chart.yaml b/charts/datadog/datadog/Chart.yaml index e5c59b28e..d7b863840 100644 --- a/charts/datadog/datadog/Chart.yaml +++ b/charts/datadog/datadog/Chart.yaml @@ -19,4 +19,4 @@ name: datadog sources: - https://app.datadoghq.com/account/settings#agent/kubernetes - https://github.com/DataDog/datadog-agent -version: 3.42.1 +version: 3.43.1 diff --git a/charts/datadog/datadog/README.md b/charts/datadog/datadog/README.md index 4151dd9b7..a3f23c044 100644 --- a/charts/datadog/datadog/README.md +++ b/charts/datadog/datadog/README.md @@ -1,6 +1,6 @@ # Datadog -![Version: 3.42.1](https://img.shields.io/badge/Version-3.42.1-informational?style=flat-square) ![AppVersion: 7](https://img.shields.io/badge/AppVersion-7-informational?style=flat-square) +![Version: 3.43.1](https://img.shields.io/badge/Version-3.43.1-informational?style=flat-square) ![AppVersion: 7](https://img.shields.io/badge/AppVersion-7-informational?style=flat-square) [Datadog](https://www.datadoghq.com/) is a hosted infrastructure monitoring platform. This chart adds the Datadog Agent to all nodes in your cluster via a DaemonSet. It also optionally depends on the [kube-state-metrics chart](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-state-metrics). For more information about monitoring Kubernetes with Datadog, please refer to the [Datadog documentation website](https://docs.datadoghq.com/agent/basic_agent_usage/kubernetes/). @@ -450,7 +450,7 @@ helm install \ | agents.image.pullPolicy | string | `"IfNotPresent"` | Datadog Agent image pull policy | | agents.image.pullSecrets | list | `[]` | Datadog Agent repository pullSecret (ex: specify docker registry credentials) | | agents.image.repository | string | `nil` | Override default registry + image.name for Agent | -| agents.image.tag | string | `"7.48.1"` | Define the Agent version to use | +| agents.image.tag | string | `"7.49.0"` | Define the Agent version to use | | agents.image.tagSuffix | string | `""` | Suffix to append to Agent tag | | agents.localService.forceLocalServiceEnabled | bool | `false` | Force the creation of the internal traffic policy service to target the agent running on the local node. By default, the internal traffic service is created only on Kubernetes 1.22+ where the feature became beta and enabled by default. This option allows to force the creation of the internal traffic service on kubernetes 1.21 where the feature was alpha and required a feature gate to be explicitly enabled. | | agents.localService.overrideName | string | `""` | Name of the internal traffic service to target the agent running on the local node | @@ -514,7 +514,7 @@ helm install \ | clusterAgent.image.pullPolicy | string | `"IfNotPresent"` | Cluster Agent image pullPolicy | | clusterAgent.image.pullSecrets | list | `[]` | Cluster Agent repository pullSecret (ex: specify docker registry credentials) | | clusterAgent.image.repository | string | `nil` | Override default registry + image.name for Cluster Agent | -| clusterAgent.image.tag | string | `"7.48.1"` | Cluster Agent image tag to use | +| clusterAgent.image.tag | string | `"7.49.0"` | Cluster Agent image tag to use | | clusterAgent.livenessProbe | object | Every 15s / 6 KO / 1 OK | Override default Cluster Agent liveness probe settings | | clusterAgent.metricsProvider.aggregator | string | `"avg"` | Define the aggregator the cluster agent will use to process the metrics. The options are (avg, min, max, sum) | | clusterAgent.metricsProvider.createReaderRbac | bool | `true` | Create `external-metrics-reader` RBAC automatically (to allow HPA to read data from Cluster Agent) | @@ -565,7 +565,7 @@ helm install \ | clusterChecksRunner.image.pullPolicy | string | `"IfNotPresent"` | Datadog Agent image pull policy | | clusterChecksRunner.image.pullSecrets | list | `[]` | Datadog Agent repository pullSecret (ex: specify docker registry credentials) | | clusterChecksRunner.image.repository | string | `nil` | Override default registry + image.name for Cluster Check Runners | -| clusterChecksRunner.image.tag | string | `"7.48.1"` | Define the Agent version to use | +| clusterChecksRunner.image.tag | string | `"7.49.0"` | Define the Agent version to use | | clusterChecksRunner.image.tagSuffix | string | `""` | Suffix to append to Agent tag | | clusterChecksRunner.livenessProbe | object | Every 15s / 6 KO / 1 OK | Override default agent liveness probe settings | | clusterChecksRunner.networkPolicy.create | bool | `false` | If true, create a NetworkPolicy for the cluster checks runners. DEPRECATED. Use datadog.networkPolicy.create instead | @@ -609,13 +609,13 @@ helm install \ | datadog.clusterTagger.collectKubernetesTags | bool | `false` | Enables Kubernetes resources tags collection. | | datadog.collectEvents | bool | `true` | Enables this to start event collection from the kubernetes API | | datadog.confd | object | `{}` | Provide additional check configurations (static and Autodiscovery) | -| datadog.containerExclude | string | `nil` | Exclude containers from the Agent Autodiscovery, as a space-sepatered list | -| datadog.containerExcludeLogs | string | `nil` | Exclude logs from the Agent Autodiscovery, as a space-separated list | -| datadog.containerExcludeMetrics | string | `nil` | Exclude metrics from the Agent Autodiscovery, as a space-separated list | +| datadog.containerExclude | string | `nil` | Exclude containers from Agent Autodiscovery, as a space-separated list | +| datadog.containerExcludeLogs | string | `nil` | Exclude logs from Agent Autodiscovery, as a space-separated list | +| datadog.containerExcludeMetrics | string | `nil` | Exclude metrics from Agent Autodiscovery, as a space-separated list | | datadog.containerImageCollection.enabled | bool | `false` | Enable collection of container image metadata | -| datadog.containerInclude | string | `nil` | Include containers in the Agent Autodiscovery, as a space-separated list. If a container matches an include rule, it’s always included in the Autodiscovery | -| datadog.containerIncludeLogs | string | `nil` | Include logs in the Agent Autodiscovery, as a space-separated list | -| datadog.containerIncludeMetrics | string | `nil` | Include metrics in the Agent Autodiscovery, as a space-separated list | +| datadog.containerInclude | string | `nil` | Include containers in Agent Autodiscovery, as a space-separated list. If a container matches an include rule, it’s always included in Autodiscovery | +| datadog.containerIncludeLogs | string | `nil` | Include logs in Agent Autodiscovery, as a space-separated list | +| datadog.containerIncludeMetrics | string | `nil` | Include metrics in Agent Autodiscovery, as a space-separated list | | datadog.containerLifecycle.enabled | bool | `true` | Enable container lifecycle events collection | | datadog.containerRuntimeSupport.enabled | bool | `true` | Set this to false to disable agent access to container runtime. | | datadog.criSocketPath | string | `nil` | Path to the container runtime socket (if different from Docker) | @@ -634,8 +634,8 @@ helm install \ | datadog.env | list | `[]` | Set environment variables for all Agents | | datadog.envDict | object | `{}` | Set environment variables for all Agents defined in a dict | | datadog.envFrom | list | `[]` | Set environment variables for all Agents directly from configMaps and/or secrets | -| datadog.excludePauseContainer | bool | `true` | Exclude pause containers from the Agent Autodiscovery. | -| datadog.expvarPort | int | `6000` | Specify the port to expose pprof and expvar to not interfer with the agentmetrics port from the cluster-agent, which defaults to 5000 | +| datadog.excludePauseContainer | bool | `true` | Exclude pause containers from Agent Autodiscovery. | +| datadog.expvarPort | int | `6000` | Specify the port to expose pprof and expvar to not interfere with the agent metrics port from the cluster-agent, which defaults to 5000 | | datadog.helmCheck.collectEvents | bool | `false` | Set this to true to enable event collection in the Helm Check (Requires Agent 7.36.0+ and Cluster Agent 1.20.0+) This requires datadog.HelmCheck.enabled to be set to true | | datadog.helmCheck.enabled | bool | `false` | Set this to true to enable the Helm check (Requires Agent 7.35.0+ and Cluster Agent 1.19.0+) This requires clusterAgent.enabled to be set to true | | datadog.helmCheck.valuesAsTags | object | `{}` | Collects Helm values from a release and uses them as tags (Requires Agent and Cluster Agent 7.40.0+). This requires datadog.HelmCheck.enabled to be set to true | @@ -662,7 +662,7 @@ helm install \ | datadog.kubernetesEvents.collectedEventTypes | list | `[{"kind":"Pod","reasons":["Failed","BackOff","Unhealthy","FailedScheduling","FailedMount","FailedAttachVolume"]},{"kind":"Node","reasons":["TerminatingEvictedPod","NodeNotReady","Rebooted","HostPortConflict"]},{"kind":"CronJob","reasons":["SawCompletedJob"]}]` | Event types to be collected. This requires datadog.kubernetesEvents.unbundleEvents to be set to true. | | datadog.kubernetesEvents.unbundleEvents | bool | `false` | Allow unbundling kubernetes events, 1:1 mapping between Kubernetes and Datadog events. (Requires Cluster Agent 7.42.0+). | | datadog.leaderElection | bool | `true` | Enables leader election mechanism for event collection | -| datadog.leaderElectionResource | string | `"configmap"` | Selects the default resource to use for leader election. Can be: * "lease" / "leases". Only supported in agent 7.47+ * "configmap" / "confimaps". "" to automatically detect which one to use. | +| datadog.leaderElectionResource | string | `"configmap"` | Selects the default resource to use for leader election. Can be: * "lease" / "leases". Only supported in agent 7.47+ * "configmap" / "configmaps". "" to automatically detect which one to use. | | datadog.leaderLeaseDuration | string | `nil` | Set the lease time for leader election in second | | datadog.logLevel | string | `"INFO"` | Set logging verbosity, valid log levels are: trace, debug, info, warn, error, critical, off | | datadog.logs.autoMultiLineDetection | bool | `false` | Allows the Agent to detect common multi-line patterns automatically. | @@ -747,17 +747,17 @@ helm install \ | existingClusterAgent.serviceName | string | `nil` | Existing service name to use for reaching the external Cluster Agent | | existingClusterAgent.tokenSecretName | string | `nil` | Existing secret name to use for external Cluster Agent token | | fips.customFipsConfig | object | `{}` | Configure a custom configMap to provide the FIPS configuration. Specify custom contents for the FIPS proxy sidecar container config (/etc/datadog-fips-proxy/datadog-fips-proxy.cfg). If empty, the default FIPS proxy sidecar container config is used. | -| fips.enabled | bool | `false` | | +| fips.enabled | bool | `false` | Enable fips sidecar | | fips.image.digest | string | `""` | Define the FIPS sidecar image digest to use, takes precedence over `fips.image.tag` if specified. | | fips.image.name | string | `"fips-proxy"` | | | fips.image.pullPolicy | string | `"IfNotPresent"` | Datadog the FIPS sidecar image pull policy | | fips.image.repository | string | `nil` | Override default registry + image.name for the FIPS sidecar container. | | fips.image.tag | string | `"0.6.1"` | Define the FIPS sidecar container version to use. | -| fips.local_address | string | `"127.0.0.1"` | | -| fips.port | int | `9803` | | -| fips.portRange | int | `15` | | +| fips.local_address | string | `"127.0.0.1"` | Set local IP address | +| fips.port | int | `9803` | Specifies which port is used by the containers to communicate to the FIPS sidecar. | +| fips.portRange | int | `15` | Specifies the number of ports used, defaults to 13 https://github.com/DataDog/datadog-agent/blob/7.44.x/pkg/config/config.go#L1564-L1577 | | fips.resources | object | `{}` | Resource requests and limits for the FIPS sidecar container. | -| fips.use_https | bool | `false` | | +| fips.use_https | bool | `false` | Option to enable https | | fullnameOverride | string | `nil` | Override the full qualified app name | | kube-state-metrics.image.repository | string | `"registry.k8s.io/kube-state-metrics/kube-state-metrics"` | Default kube-state-metrics image repository. | | kube-state-metrics.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node selector for KSM. KSM only supports Linux. | @@ -766,12 +766,12 @@ helm install \ | kube-state-metrics.serviceAccount.create | bool | `true` | If true, create ServiceAccount, require rbac kube-state-metrics.rbac.create true | | kube-state-metrics.serviceAccount.name | string | `nil` | The name of the ServiceAccount to use. | | nameOverride | string | `nil` | Override name of app | -| providers.aks.enabled | bool | `false` | Activate all specifities related to AKS configuration. Required as currently we cannot auto-detect AKS. | +| providers.aks.enabled | bool | `false` | Activate all specificities related to AKS configuration. Required as currently we cannot auto-detect AKS. | | providers.eks.ec2.useHostnameFromFile | bool | `false` | Use hostname from EC2 filesystem instead of fetching from metadata endpoint. | | providers.gke.autopilot | bool | `false` | Enables Datadog Agent deployment on GKE Autopilot | | providers.gke.cos | bool | `false` | Enables Datadog Agent deployment on GKE with Container-Optimized OS (COS) | | registry | string | `"gcr.io/datadoghq"` | Registry to use for all Agent images (default gcr.io) | -| remoteConfiguration.enabled | bool | `true` | Set to true to enable remote configuration on the Cluster Agent (if set) and the node agent. Can be overriden if `datadog.remoteConfiguration.enabled` or `clusterAgent.admissionController.remoteInstrumentation.enabled` is set to `false`. Preferred way to enable Remote Configuration. | +| remoteConfiguration.enabled | bool | `true` | Set to true to enable remote configuration on the Cluster Agent (if set) and the node agent. Can be overridden if `datadog.remoteConfiguration.enabled` or `clusterAgent.admissionController.remoteInstrumentation.enabled` is set to `false`. Preferred way to enable Remote Configuration. | | targetSystem | string | `"linux"` | Target OS for this deployment (possible values: linux, windows) | ## Configuration options for Windows deployments diff --git a/charts/datadog/datadog/values.yaml b/charts/datadog/datadog/values.yaml index af819cfa6..c39509c0b 100644 --- a/charts/datadog/datadog/values.yaml +++ b/charts/datadog/datadog/values.yaml @@ -279,7 +279,7 @@ datadog: # @default -- /var/log/pods on Linux, C:\var\log\pods on Windows podLogsPath: - # datadog.expvarPort -- Specify the port to expose pprof and expvar to not interfer with the agentmetrics port from the cluster-agent, which defaults to 5000 + # datadog.expvarPort -- Specify the port to expose pprof and expvar to not interfere with the agent metrics port from the cluster-agent, which defaults to 5000 expvarPort: 6000 ## dogstatsd configuration @@ -394,7 +394,7 @@ datadog: # datadog.leaderElectionResource -- Selects the default resource to use for leader election. # Can be: # * "lease" / "leases". Only supported in agent 7.47+ - # * "configmap" / "confimaps". + # * "configmap" / "configmaps". # "" to automatically detect which one to use. leaderElectionResource: configmap @@ -805,36 +805,30 @@ datadog: # - redisdb # - kubernetes_state - # datadog.containerExclude -- Exclude containers from the Agent - # Autodiscovery, as a space-sepatered list + # datadog.containerExclude -- Exclude containers from Agent Autodiscovery, as a space-separated list ## ref: https://docs.datadoghq.com/agent/guide/autodiscovery-management/?tab=containerizedagent#exclude-containers containerExclude: # "image:datadog/agent" - # datadog.containerInclude -- Include containers in the Agent Autodiscovery, - # as a space-separated list. If a container matches an include rule, it’s - # always included in the Autodiscovery + # datadog.containerInclude -- Include containers in Agent Autodiscovery, as a space-separated list. + # If a container matches an include rule, it’s always included in Autodiscovery ## ref: https://docs.datadoghq.com/agent/guide/autodiscovery-management/?tab=containerizedagent#include-containers containerInclude: - # datadog.containerExcludeLogs -- Exclude logs from the Agent Autodiscovery, - # as a space-separated list + # datadog.containerExcludeLogs -- Exclude logs from Agent Autodiscovery, as a space-separated list containerExcludeLogs: - # datadog.containerIncludeLogs -- Include logs in the Agent Autodiscovery, as - # a space-separated list + # datadog.containerIncludeLogs -- Include logs in Agent Autodiscovery, as a space-separated list containerIncludeLogs: - # datadog.containerExcludeMetrics -- Exclude metrics from the Agent - # Autodiscovery, as a space-separated list + # datadog.containerExcludeMetrics -- Exclude metrics from Agent Autodiscovery, as a space-separated list containerExcludeMetrics: - # datadog.containerIncludeMetrics -- Include metrics in the Agent - # Autodiscovery, as a space-separated list + # datadog.containerIncludeMetrics -- Include metrics in Agent Autodiscovery, as a space-separated list containerIncludeMetrics: - # datadog.excludePauseContainer -- Exclude pause containers from the Agent Autodiscovery. + # datadog.excludePauseContainer -- Exclude pause containers from Agent Autodiscovery. ## ref: https://docs.datadoghq.com/agent/guide/autodiscovery-management/?tab=containerizedagent#pause-containers excludePauseContainer: true @@ -860,7 +854,7 @@ clusterAgent: name: cluster-agent # clusterAgent.image.tag -- Cluster Agent image tag to use - tag: 7.48.1 + tag: 7.49.0 # clusterAgent.image.digest -- Cluster Agent image digest to use, takes precedence over tag if specified digest: "" @@ -1199,19 +1193,21 @@ existingClusterAgent: # fips is used to enable the fips sidecar container for GOVCLOUD environments. fips: - + # fips.enabled -- Enable fips sidecar enabled: false # TODO: Option to override config of the FIPS side car: /etc/datadog-fips-proxy/datadog-fips-proxy.cfg # customConfig: false - # fips.port specifies which port is used by the containers to communicate to the FIPS sidecar. + # fips.port -- Specifies which port is used by the containers to communicate to the FIPS sidecar. port: 9803 - # fips.portRange specifies the number of ports used, defaults to 13 https://github.com/DataDog/datadog-agent/blob/7.44.x/pkg/config/config.go#L1564-L1577 + # fips.portRange -- Specifies the number of ports used, defaults to 13 https://github.com/DataDog/datadog-agent/blob/7.44.x/pkg/config/config.go#L1564-L1577 portRange: 15 + # fips.use_https -- Option to enable https use_https: false + # fips.resources -- Resource requests and limits for the FIPS sidecar container. resources: {} # limits: @@ -1221,7 +1217,9 @@ fips: # cpu: 20m # memory: 64Mi + # fips.local_address -- Set local IP address local_address: "127.0.0.1" + ## Define the Datadog image to work with image: ## fips.image.name -- Define the FIPS sidecar container image name. @@ -1271,7 +1269,7 @@ agents: name: agent # agents.image.tag -- Define the Agent version to use - tag: 7.48.1 + tag: 7.49.0 # agents.image.digest -- Define Agent image digest to use, takes precedence over tag if specified digest: "" @@ -1739,7 +1737,7 @@ clusterChecksRunner: name: agent # clusterChecksRunner.image.tag -- Define the Agent version to use - tag: 7.48.1 + tag: 7.49.0 # clusterChecksRunner.image.digest -- Define Agent image digest to use, takes precedence over tag if specified digest: "" @@ -1972,14 +1970,6 @@ kube-state-metrics: nodeSelector: kubernetes.io/os: linux - # # kube-state-metrics.image -- Override default image information for the kube-state-metrics container. - # image: - # # kube-state-metrics.repository -- Override default image registry for the kube-state-metrics container. - # repository: k8s.gcr.io/kube-state-metrics/kube-state-metrics - # # kube-state-metrics.tag -- Override default image tag for the kube-state-metrics container. - # tag: v1.9.8 - # # kube-state-metrics.pullPolicy -- Override default image pullPolicy for the kube-state-metrics container. - # pullPolicy: IfNotPresent providers: gke: # providers.gke.autopilot -- Enables Datadog Agent deployment on GKE Autopilot @@ -1993,15 +1983,15 @@ providers: # providers.eks.ec2.useHostnameFromFile -- Use hostname from EC2 filesystem instead of fetching from metadata endpoint. ## When deploying to EC2-backed EKS infrastructure, there are situations where the - ## IMDS metadata endpoint is not accesible to containers. This flag mounts the host's + ## IMDS metadata endpoint is not accessible to containers. This flag mounts the host's ## `/var/lib/cloud/data/instance-id` and uses that for Agent's hostname instead. useHostnameFromFile: false aks: - # providers.aks.enabled -- Activate all specifities related to AKS configuration. Required as currently we cannot auto-detect AKS. + # providers.aks.enabled -- Activate all specificities related to AKS configuration. Required as currently we cannot auto-detect AKS. enabled: false remoteConfiguration: # remoteConfiguration.enabled -- Set to true to enable remote configuration on the Cluster Agent (if set) and the node agent. - # Can be overriden if `datadog.remoteConfiguration.enabled` or `clusterAgent.admissionController.remoteInstrumentation.enabled` is set to `false`. + # Can be overridden if `datadog.remoteConfiguration.enabled` or `clusterAgent.admissionController.remoteInstrumentation.enabled` is set to `false`. # Preferred way to enable Remote Configuration. enabled: true diff --git a/charts/f5/nginx-ingress/Chart.yaml b/charts/f5/nginx-ingress/Chart.yaml index 5ac66f306..43a7a13c6 100644 --- a/charts/f5/nginx-ingress/Chart.yaml +++ b/charts/f5/nginx-ingress/Chart.yaml @@ -4,10 +4,10 @@ annotations: catalog.cattle.io/kube-version: '>= 1.22.0-0' catalog.cattle.io/release-name: nginx-ingress apiVersion: v2 -appVersion: 3.3.1 +appVersion: 3.3.2 description: NGINX Ingress Controller home: https://github.com/nginxinc/kubernetes-ingress -icon: https://raw.githubusercontent.com/nginxinc/kubernetes-ingress/v3.3.1/deployments/helm-chart/chart-icon.png +icon: https://raw.githubusercontent.com/nginxinc/kubernetes-ingress/v3.3.2/deployments/helm-chart/chart-icon.png keywords: - ingress - nginx @@ -17,6 +17,6 @@ maintainers: name: nginxinc name: nginx-ingress sources: -- https://github.com/nginxinc/kubernetes-ingress/tree/v3.3.1/deployments/helm-chart +- https://github.com/nginxinc/kubernetes-ingress/tree/v3.3.2/deployments/helm-chart type: application -version: 1.0.1 +version: 1.0.2 diff --git a/charts/f5/nginx-ingress/README.md b/charts/f5/nginx-ingress/README.md index 4535b7a3d..95115a070 100644 --- a/charts/f5/nginx-ingress/README.md +++ b/charts/f5/nginx-ingress/README.md @@ -78,14 +78,14 @@ To install the chart with the release name my-release (my-release is the name th For NGINX: ```console -helm install my-release oci://ghcr.io/nginxinc/charts/nginx-ingress --version 1.0.1 +helm install my-release oci://ghcr.io/nginxinc/charts/nginx-ingress --version 1.0.2 ``` For NGINX Plus: (assuming you have pushed the Ingress Controller image `nginx-plus-ingress` to your private registry `myregistry.example.com`) ```console -helm install my-release oci://ghcr.io/nginxinc/charts/nginx-ingress --version 1.0.1 --set controller.image.repository=myregistry.example.com/nginx-plus-ingress --set controller.nginxplus=true +helm install my-release oci://ghcr.io/nginxinc/charts/nginx-ingress --version 1.0.2 --set controller.image.repository=myregistry.example.com/nginx-plus-ingress --set controller.nginxplus=true ``` This will install the latest `edge` version of the Ingress Controller from GitHub Container Registry. If you prefer to @@ -100,7 +100,7 @@ CRDs](#upgrading-the-crds). To upgrade the release `my-release`: ```console -helm upgrade my-release oci://ghcr.io/nginxinc/charts/nginx-ingress --version 1.0.1 +helm upgrade my-release oci://ghcr.io/nginxinc/charts/nginx-ingress --version 1.0.2 ``` ### Uninstalling the Chart @@ -141,7 +141,7 @@ upgrading/deleting the CRDs. 1. Pull the chart sources: ```console - helm pull oci://ghcr.io/nginxinc/charts/nginx-ingress --untar --version 1.0.1 + helm pull oci://ghcr.io/nginxinc/charts/nginx-ingress --untar --version 1.0.2 ``` 2. Change your working directory to nginx-ingress: @@ -227,7 +227,7 @@ The steps you should follow depend on the Helm release name: Selector: app=nginx-ingress-nginx-ingress ``` -2. Checkout the latest available tag using `git checkout v3.3.1` +2. Checkout the latest available tag using `git checkout v3.3.2` 3. Navigate to `/kubernates-ingress/deployments/helm-chart` @@ -279,7 +279,7 @@ reviewing its events: Selector: app=-nginx-ingress ``` -2. Checkout the latest available tag using `git checkout v3.3.1` +2. Checkout the latest available tag using `git checkout v3.3.2` 3. Navigate to `/kubernates-ingress/deployments/helm-chart` @@ -345,7 +345,7 @@ The following tables lists the configurable parameters of the NGINX Ingress Cont |`controller.logLevel` | The log level of the Ingress Controller. | 1 | |`controller.image.digest` | The image digest of the Ingress Controller. | None | |`controller.image.repository` | The image repository of the Ingress Controller. | nginx/nginx-ingress | -|`controller.image.tag` | The tag of the Ingress Controller image. | 3.3.1 | +|`controller.image.tag` | The tag of the Ingress Controller image. | 3.3.2 | |`controller.image.pullPolicy` | The pull policy for the Ingress Controller image. | IfNotPresent | |`controller.lifecycle` | The lifecycle of the Ingress Controller pods. | {} | |`controller.customConfigMap` | The name of the custom ConfigMap used by the Ingress Controller. If set, then the default config is ignored. | "" | @@ -372,7 +372,7 @@ The following tables lists the configurable parameters of the NGINX Ingress Cont |`controller.resources` | The resources of the Ingress Controller pods. | requests: cpu=100m,memory=128Mi | |`controller.replicaCount` | The number of replicas of the Ingress Controller deployment. | 1 | |`controller.ingressClass.name` | A class of the Ingress Controller. An IngressClass resource with the name equal to the class must be deployed. Otherwise, the Ingress Controller will fail to start. The Ingress Controller only processes resources that belong to its class - i.e. have the "ingressClassName" field resource equal to the class. The Ingress Controller processes all the VirtualServer/VirtualServerRoute/TransportServer resources that do not have the "ingressClassName" field for all versions of Kubernetes. | nginx | -|`controller.ingressClass.create` | Creates a new IngressClass object with the name `controller.ingressClass.name`. Set to `false` to use an existing ingressClass created using `kubectl` with the same name. If you use `helm upgrade`, do not change the values from the previous release as helm will delete IngressClass objects managed by helm. If you are upgrading from a release earlier than 3.3.1, do not set the value to false. | true | +|`controller.ingressClass.create` | Creates a new IngressClass object with the name `controller.ingressClass.name`. Set to `false` to use an existing ingressClass created using `kubectl` with the same name. If you use `helm upgrade`, do not change the values from the previous release as helm will delete IngressClass objects managed by helm. If you are upgrading from a release earlier than 3.3.2, do not set the value to false. | true | |`controller.ingressClass.setAsDefaultIngress` | New Ingresses without an `"ingressClassName"` field specified will be assigned the class specified in `controller.ingressClass.name`. Requires `controller.ingressClass.create`. | false | |`controller.watchNamespace` | Comma separated list of namespaces the Ingress Controller should watch for resources. By default the Ingress Controller watches all namespaces. Mutually exclusive with `controller.watchNamespaceLabel`. Please note that if configuring multiple namespaces using the Helm cli `--set` option, the string needs to wrapped in double quotes and the commas escaped using a backslash - e.g. `--set controller.watchNamespace="default\,nginx-ingress"`. | "" | |`controller.watchNamespaceLabel` | Configures the Ingress Controller to watch only those namespaces with label foo=bar. By default the Ingress Controller watches all namespaces. Mutually exclusive with `controller.watchNamespace`. | "" | diff --git a/charts/f5/nginx-ingress/values-icp.yaml b/charts/f5/nginx-ingress/values-icp.yaml index a48460b6a..7c1025ba6 100644 --- a/charts/f5/nginx-ingress/values-icp.yaml +++ b/charts/f5/nginx-ingress/values-icp.yaml @@ -4,7 +4,7 @@ controller: nginxplus: true image: repository: mycluster.icp:8500/kube-system/nginx-plus-ingress - tag: "3.3.1" + tag: "3.3.2" nodeSelector: beta.kubernetes.io/arch: "amd64" proxy: true diff --git a/charts/f5/nginx-ingress/values-plus.yaml b/charts/f5/nginx-ingress/values-plus.yaml index 8f488af85..072e265b0 100644 --- a/charts/f5/nginx-ingress/values-plus.yaml +++ b/charts/f5/nginx-ingress/values-plus.yaml @@ -3,4 +3,4 @@ controller: nginxplus: true image: repository: nginx-plus-ingress - tag: "3.3.1" + tag: "3.3.2" diff --git a/charts/f5/nginx-ingress/values.yaml b/charts/f5/nginx-ingress/values.yaml index 781742e97..352497d95 100644 --- a/charts/f5/nginx-ingress/values.yaml +++ b/charts/f5/nginx-ingress/values.yaml @@ -75,7 +75,7 @@ controller: repository: nginx/nginx-ingress ## The tag of the Ingress Controller image. If not specified the appVersion from Chart.yaml is used as a tag. - # tag: "3.3.1" + # tag: "3.3.2" ## The digest of the Ingress Controller image. ## If digest is specified it has precedence over tag and will be used instead @@ -232,7 +232,7 @@ controller: ## The Ingress Controller processes all the resources that do not have the "ingressClassName" field for all versions of kubernetes. name: nginx - ## Creates a new IngressClass object with the name "controller.ingressClass.name". Set to false to use an existing IngressClass with the same name. If you use helm upgrade, do not change the values from the previous release as helm will delete IngressClass objects managed by helm. If you are upgrading from a release earlier than 3.3.1, do not set the value to false. + ## Creates a new IngressClass object with the name "controller.ingressClass.name". Set to false to use an existing IngressClass with the same name. If you use helm upgrade, do not change the values from the previous release as helm will delete IngressClass objects managed by helm. If you are upgrading from a release earlier than 3.3.2, do not set the value to false. create: true ## New Ingresses without an ingressClassName field specified will be assigned the class specified in `controller.ingressClass`. Requires "controller.ingressClass.create". diff --git a/charts/haproxy/haproxy/Chart.yaml b/charts/haproxy/haproxy/Chart.yaml index 9c3cc9da3..1e46e96fd 100644 --- a/charts/haproxy/haproxy/Chart.yaml +++ b/charts/haproxy/haproxy/Chart.yaml @@ -1,12 +1,13 @@ annotations: artifacthub.io/changes: | - - Use Ingress Controller 1.10.8 version for base image + - Use Ingress Controller 1.10.9 version for base image + - Set allowPrivilegeEscalation to false by default catalog.cattle.io/certified: partner catalog.cattle.io/display-name: HAProxy Kubernetes Ingress Controller catalog.cattle.io/kube-version: '>=1.22.0-0' catalog.cattle.io/release-name: haproxy apiVersion: v2 -appVersion: 1.10.8 +appVersion: 1.10.9 description: A Helm chart for HAProxy Kubernetes Ingress Controller home: https://github.com/haproxytech/helm-charts/tree/main/kubernetes-ingress icon: https://raw.githubusercontent.com/haproxytech/helm-charts/main/kubernetes-ingress/chart-icon.png @@ -21,4 +22,4 @@ name: haproxy sources: - https://github.com/haproxytech/kubernetes-ingress type: application -version: 1.33.1 +version: 1.34.0 diff --git a/charts/haproxy/haproxy/templates/controller-daemonset.yaml b/charts/haproxy/haproxy/templates/controller-daemonset.yaml index 5057dd8aa..7d88e8d03 100644 --- a/charts/haproxy/haproxy/templates/controller-daemonset.yaml +++ b/charts/haproxy/haproxy/templates/controller-daemonset.yaml @@ -123,7 +123,7 @@ spec: runAsNonRoot: true runAsUser: 1000 runAsGroup: 1000 - allowPrivilegeEscalation: true + allowPrivilegeEscalation: {{ .Values.controller.allowPrivilegeEscalation }} capabilities: drop: - ALL diff --git a/charts/haproxy/haproxy/templates/controller-deployment.yaml b/charts/haproxy/haproxy/templates/controller-deployment.yaml index be9a44117..03c78de2d 100644 --- a/charts/haproxy/haproxy/templates/controller-deployment.yaml +++ b/charts/haproxy/haproxy/templates/controller-deployment.yaml @@ -124,7 +124,7 @@ spec: runAsNonRoot: true runAsUser: 1000 runAsGroup: 1000 - allowPrivilegeEscalation: true + allowPrivilegeEscalation: {{ .Values.controller.allowPrivilegeEscalation }} capabilities: drop: - ALL diff --git a/charts/haproxy/haproxy/templates/controller-podsecuritypolicy.yaml b/charts/haproxy/haproxy/templates/controller-podsecuritypolicy.yaml index 317ee78cc..055120d92 100644 --- a/charts/haproxy/haproxy/templates/controller-podsecuritypolicy.yaml +++ b/charts/haproxy/haproxy/templates/controller-podsecuritypolicy.yaml @@ -43,7 +43,7 @@ metadata: seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default' apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' spec: - allowPrivilegeEscalation: true + allowPrivilegeEscalation: {{ .Values.controller.allowPrivilegeEscalation }} allowedCapabilities: - NET_BIND_SERVICE defaultAllowPrivilegeEscalation: false diff --git a/charts/haproxy/haproxy/values.yaml b/charts/haproxy/haproxy/values.yaml index 490c71e8f..d474af085 100644 --- a/charts/haproxy/haproxy/values.yaml +++ b/charts/haproxy/haproxy/values.yaml @@ -81,6 +81,10 @@ controller: ## ref: https://kubernetes.io/docs/tutorials/security/seccomp/ enableRuntimeDefaultSeccompProfile: true + ## Privilege escalation + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + allowPrivilegeEscalation: false + ## Init Containers ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ initContainers: [] @@ -107,6 +111,7 @@ controller: ## Controller Container listener port configuration ## ref: https://kubernetes.io/docs/concepts/services-networking/connect-applications-service/ + ## Note: If binding to privileged ports, allowPrivilegeEscalation will be required for NET_BIND_SERVICE to apply containerPort: http: 8080 https: 8443 diff --git a/charts/harbor/harbor/Chart.yaml b/charts/harbor/harbor/Chart.yaml index 6b093bb1a..1203998e1 100644 --- a/charts/harbor/harbor/Chart.yaml +++ b/charts/harbor/harbor/Chart.yaml @@ -4,7 +4,7 @@ annotations: catalog.cattle.io/kube-version: '>=1.20-0' catalog.cattle.io/release-name: harbor apiVersion: v1 -appVersion: 2.9.0 +appVersion: 2.9.1 description: An open source trusted cloud native registry that stores, signs, and scans content home: https://goharbor.io @@ -24,4 +24,4 @@ name: harbor sources: - https://github.com/goharbor/harbor - https://github.com/goharbor/harbor-helm -version: 1.13.0 +version: 1.13.1 diff --git a/charts/harbor/harbor/templates/_helpers.tpl b/charts/harbor/harbor/templates/_helpers.tpl index 95a28a6c5..6ee24fee8 100644 --- a/charts/harbor/harbor/templates/_helpers.tpl +++ b/charts/harbor/harbor/templates/_helpers.tpl @@ -144,12 +144,26 @@ app: "{{ template "harbor.name" . }}" {{- end }} {{- end -}} + +{{- define "harbor.redis.pwdfromsecret" -}} + {{- (lookup "v1" "Secret" .Release.Namespace (.Values.redis.external.existingSecret)).data.REDIS_PASSWORD | b64dec }} +{{- end -}} + +{{- define "harbor.redis.cred" -}} + {{- with .Values.redis }} + {{- if (and (eq .type "external" ) (.external.existingSecret)) }} + {{- printf ":%s@" (include "harbor.redis.pwdfromsecret" $) }} + {{- else }} + {{- ternary (printf "%s:%s@" (.external.username | urlquery) (.external.password | urlquery)) "" (and (eq .type "external" ) (not (not .external.password))) }} + {{- end }} + {{- end }} +{{- end -}} + /*scheme://[:password@]host:port[/master_set]*/ {{- define "harbor.redis.url" -}} {{- with .Values.redis }} {{- $path := ternary "" (printf "/%s" (include "harbor.redis.masterSet" $)) (not (include "harbor.redis.masterSet" $)) }} - {{- $cred := ternary (printf "%s:%s@" (.external.username | urlquery) (.external.password | urlquery)) "" (and (eq .type "external" ) (not (not .external.password))) }} - {{- printf "%s://%s%s%s" (include "harbor.redis.scheme" $) $cred (include "harbor.redis.addr" $) $path -}} + {{- printf "%s://%s%s%s" (include "harbor.redis.scheme" $) (include "harbor.redis.cred" $) (include "harbor.redis.addr" $) $path -}} {{- end }} {{- end -}} diff --git a/charts/harbor/harbor/templates/registry/registry-dpl.yaml b/charts/harbor/harbor/templates/registry/registry-dpl.yaml index fddba9fa8..b9c97ff89 100644 --- a/charts/harbor/harbor/templates/registry/registry-dpl.yaml +++ b/charts/harbor/harbor/templates/registry/registry-dpl.yaml @@ -113,6 +113,13 @@ spec: - name: INTERNAL_TLS_TRUST_CA_PATH value: /etc/harbor/ssl/registry/ca.crt {{- end }} + {{- if .Values.redis.external.existingSecret }} + - name: REGISTRY_REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.redis.external.existingSecret }} + key: REDIS_PASSWORD + {{- end }} {{- if .Values.persistence.imageChartStorage.azure.existingSecret }} - name: REGISTRY_STORAGE_AZURE_ACCOUNTKEY valueFrom: diff --git a/charts/harbor/harbor/values.yaml b/charts/harbor/harbor/values.yaml index 1160c94eb..3da298bc4 100644 --- a/charts/harbor/harbor/values.yaml +++ b/charts/harbor/harbor/values.yaml @@ -60,6 +60,7 @@ expose: # The service port Harbor listens on when serving HTTP httpPort: 80 # The service port Harbor listens on when serving HTTPS + httpsPort: 443 nodePort: # The name of NodePort service name: harbor @@ -378,7 +379,7 @@ enableMigrateHelmHook: false nginx: image: repository: goharbor/nginx-photon - tag: v2.9.0 + tag: v2.9.1 # set the service account to be used, default if left empty serviceAccountName: "" # mount the service account token @@ -409,7 +410,7 @@ nginx: portal: image: repository: goharbor/harbor-portal - tag: v2.9.0 + tag: v2.9.1 # set the service account to be used, default if left empty serviceAccountName: "" # mount the service account token @@ -440,7 +441,7 @@ portal: core: image: repository: goharbor/harbor-core - tag: v2.9.0 + tag: v2.9.1 # set the service account to be used, default if left empty serviceAccountName: "" # mount the service account token @@ -512,7 +513,7 @@ core: jobservice: image: repository: goharbor/harbor-jobservice - tag: v2.9.0 + tag: v2.9.1 replicas: 1 revisionHistoryLimit: 10 # set the service account to be used, default if left empty @@ -569,7 +570,7 @@ registry: registry: image: repository: goharbor/registry-photon - tag: v2.9.0 + tag: v2.9.1 # resources: # requests: # memory: 256Mi @@ -578,7 +579,7 @@ registry: controller: image: repository: goharbor/harbor-registryctl - tag: v2.9.0 + tag: v2.9.1 # resources: # requests: @@ -644,7 +645,7 @@ trivy: # repository the repository for Trivy adapter image repository: goharbor/trivy-adapter-photon # tag the tag for Trivy adapter image - tag: v2.9.0 + tag: v2.9.1 # set the service account to be used, default if left empty serviceAccountName: "" # mount the service account token @@ -730,7 +731,7 @@ database: automountServiceAccountToken: false image: repository: goharbor/harbor-db - tag: v2.9.0 + tag: v2.9.1 # The initial superuser password for internal database password: "changeit" # The size limit for Shared memory, pgSQL use it for shared_buffer @@ -803,7 +804,7 @@ redis: automountServiceAccountToken: false image: repository: goharbor/redis-photon - tag: v2.9.0 + tag: v2.9.1 # resources: # requests: # memory: 256Mi @@ -867,7 +868,7 @@ exporter: automountServiceAccountToken: false image: repository: goharbor/harbor-exporter - tag: v2.9.0 + tag: v2.9.1 nodeSelector: {} tolerations: [] affinity: {} diff --git a/charts/hashicorp/consul/Chart.yaml b/charts/hashicorp/consul/Chart.yaml index 0910b34da..7f6e75477 100644 --- a/charts/hashicorp/consul/Chart.yaml +++ b/charts/hashicorp/consul/Chart.yaml @@ -1,13 +1,13 @@ annotations: artifacthub.io/images: | - name: consul - image: hashicorp/consul:1.16.2 + image: hashicorp/consul:1.16.3 - name: consul-k8s-control-plane - image: hashicorp/consul-k8s-control-plane:1.2.2 + image: hashicorp/consul-k8s-control-plane:1.2.3 - name: consul-dataplane - image: hashicorp/consul-dataplane:1.2.2 + image: hashicorp/consul-dataplane:1.2.3 - name: envoy - image: envoyproxy/envoy:v1.25.9 + image: envoyproxy/envoy:v1.25.11 artifacthub.io/license: MPL-2.0 artifacthub.io/links: | - name: Documentation @@ -25,7 +25,7 @@ annotations: catalog.cattle.io/kube-version: '>=1.22.0-0' catalog.cattle.io/release-name: consul apiVersion: v2 -appVersion: 1.16.2 +appVersion: 1.16.3 description: Official HashiCorp Consul Chart home: https://www.consul.io icon: https://raw.githubusercontent.com/hashicorp/consul-k8s/main/assets/icon.png @@ -34,4 +34,4 @@ name: consul sources: - https://github.com/hashicorp/consul - https://github.com/hashicorp/consul-k8s -version: 1.2.2 +version: 1.2.3 diff --git a/charts/hashicorp/consul/templates/crd-controlplanerequestlimits.yaml b/charts/hashicorp/consul/templates/crd-controlplanerequestlimits.yaml index 01722c0cf..326445e05 100644 --- a/charts/hashicorp/consul/templates/crd-controlplanerequestlimits.yaml +++ b/charts/hashicorp/consul/templates/crd-controlplanerequestlimits.yaml @@ -121,7 +121,7 @@ spec: type: object mode: type: string - perparedQuery: + preparedQuery: properties: readRate: type: number diff --git a/charts/hashicorp/consul/values.yaml b/charts/hashicorp/consul/values.yaml index e3bbfc181..caffadc6d 100644 --- a/charts/hashicorp/consul/values.yaml +++ b/charts/hashicorp/consul/values.yaml @@ -66,7 +66,7 @@ global: # image: "hashicorp/consul-enterprise:1.10.0-ent" # ``` # @default: hashicorp/consul: - image: hashicorp/consul:1.16.2 + image: hashicorp/consul:1.16.3 # Array of objects containing image pull secret names that will be applied to each service account. # This can be used to reference image pull secrets if using a custom consul or consul-k8s-control-plane Docker image. @@ -86,7 +86,7 @@ global: # image that is used for functionality such as catalog sync. # This can be overridden per component. # @default: hashicorp/consul-k8s-control-plane: - imageK8S: hashicorp/consul-k8s-control-plane:1.2.2 + imageK8S: hashicorp/consul-k8s-control-plane:1.2.3 # The name of the datacenter that the agents should # register as. This can't be changed once the Consul cluster is up and running @@ -639,7 +639,7 @@ global: # The name (and tag) of the consul-dataplane Docker image used for the # connect-injected sidecar proxies and mesh, terminating, and ingress gateways. # @default: hashicorp/consul-dataplane: - imageConsulDataplane: hashicorp/consul-dataplane:1.2.2 + imageConsulDataplane: hashicorp/consul-dataplane:1.2.3 # Configuration for running this Helm chart on the Red Hat OpenShift platform. # This Helm chart currently supports OpenShift v4.x+. @@ -3215,7 +3215,7 @@ terminatingGateways: gateways: - name: terminating-gateway -# [DEPRECATED] Use connectInject.apiGateway instead. This stanza will be removed with the release of Consul 1.17 +# [DEPRECATED] Use connectInject.apiGateway instead. # Configuration settings for the Consul API Gateway integration apiGateway: # When true the helm chart will install the Consul API Gateway controller @@ -3230,7 +3230,7 @@ apiGateway: # The name (and tag) of the Envoy Docker image used for the # apiGateway. For other Consul compoenents, imageEnvoy has been replaced with Consul Dataplane. # @default: envoyproxy/envoy: - imageEnvoy: "envoyproxy/envoy:v1.25.9" + imageEnvoy: "envoyproxy/envoy:v1.25.11" # Override global log verbosity level for api-gateway-controller pods. One of "debug", "info", "warn", or "error". # @type: string @@ -3430,7 +3430,7 @@ telemetryCollector: # The name of the Docker image (including any tag) for the containers running # the consul-telemetry-collector # @type: string - image: "hashicorp/consul-telemetry-collector:0.0.1" + image: "hashicorp/consul-telemetry-collector:0.0.2" # The resource settings for consul-telemetry-collector pods. # @recurse: false diff --git a/charts/kasten/k10/Chart.lock b/charts/kasten/k10/Chart.lock index 9ccc62aa9..612bfde66 100644 --- a/charts/kasten/k10/Chart.lock +++ b/charts/kasten/k10/Chart.lock @@ -5,5 +5,5 @@ dependencies: - name: prometheus repository: "" version: 23.3.0 -digest: sha256:742c8bb60a7bdc54588a1823848e117fe9498fb841eb11270f486a297534997c -generated: "2023-10-25T10:10:45.774911186Z" +digest: sha256:b557368766b7257191f31381f97e0d29f97e8d97104a74316d63515f48349ad4 +generated: "2023-11-04T06:31:38.895988478Z" diff --git a/charts/kasten/k10/Chart.yaml b/charts/kasten/k10/Chart.yaml index 56995bba9..bc0ea6c3a 100644 --- a/charts/kasten/k10/Chart.yaml +++ b/charts/kasten/k10/Chart.yaml @@ -4,12 +4,14 @@ annotations: catalog.cattle.io/kube-version: '>= 1.17.0-0' catalog.cattle.io/release-name: k10 apiVersion: v2 -appVersion: 6.0.11 +appVersion: 6.0.12 dependencies: -- name: grafana +- condition: grafana.enabled + name: grafana repository: file://./charts/grafana version: 6.60.6 -- name: prometheus +- condition: prometheus.server.enabled + name: prometheus repository: file://./charts/prometheus version: 23.3.0 description: Kasten’s K10 Data Management Platform @@ -19,4 +21,4 @@ maintainers: - email: contact@kasten.io name: kastenIO name: k10 -version: 6.0.1101 +version: 6.0.1201 diff --git a/charts/kasten/k10/README.md b/charts/kasten/k10/README.md index d8d6b3a79..94cf65098 100644 --- a/charts/kasten/k10/README.md +++ b/charts/kasten/k10/README.md @@ -220,7 +220,6 @@ Parameter | Description | Default `route.tls.enabled` | Configures a TLS use for `route.host` | `false` `route.tls.insecureEdgeTerminationPolicy` | Specifies behavior for insecure scheme traffic | `Redirect` `route.tls.termination` | Specifies the TLS termination of the route | `edge` -`restore.copyImagePullSecrets` | Configures if `imagePullSecret` should be copied to application namespace during restore process | `true` `apigateway.serviceResolver` | Specifies the resolver used for service discovery in the API gateway (`dns` or `endpoint`) | `dns` `limiter.concurrentSnapConversions` | Limit of concurrent snapshots to convert during export | `3` `limiter.genericVolumeSnapshots` | Limit of concurrent generic volume snapshot create operations | `10` @@ -243,8 +242,7 @@ Parameter | Description | Default `encryption.primaryKey.awsCmkKeyId` | Specifies the AWS CMK key ID for encrypting K10 Primary Key | `None` `garbagecollector.daemonPeriod` | Sets garbage collection period (in seconds) | `21600` `garbagecollector.keepMaxActions` | Sets maximum actions to keep | `1000` -`garbagecollector.importRunActions.enabled` | Enables ``importRunActions`` collector | `false` -`garbagecollector.retireActions.enabled` | Enables ``retireActions`` collector | `false` +`garbagecollector.actions.enabled` | Enables action collectors | `false` `kubeVirtVMs.snapshot.unfreezeTimeout` | Defines the time duration within which the VMs must be unfrozen while backing them up. To know more about format [go doc](https://pkg.go.dev/time#ParseDuration) can be followed | `5m` `excludedApps` | Specifies a list of applications to be excluded from the dashboard & compliance considerations. Format should be a :ref:`YAML array` | `["kube-system", "kube-ingress", "kube-node-lease", "kube-public", "kube-rook-ceph"]` `kanisterPodMetricSidecar.enabled` | Enable the sidecar container to gather metrics from ephemeral pods | `true` diff --git a/charts/kasten/k10/charts/grafana/templates/clusterrole.yaml b/charts/kasten/k10/charts/grafana/templates/clusterrole.yaml index 58989c205..3af4b62b6 100644 --- a/charts/kasten/k10/charts/grafana/templates/clusterrole.yaml +++ b/charts/kasten/k10/charts/grafana/templates/clusterrole.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled -}} {{- if and .Values.rbac.create (or (not .Values.rbac.namespaced) .Values.rbac.extraClusterRoleRules) (not .Values.rbac.useExistingClusterRole) }} kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 @@ -24,4 +23,3 @@ rules: rules: [] {{- end}} {{- end}} -{{- end -}} diff --git a/charts/kasten/k10/charts/grafana/templates/clusterrolebinding.yaml b/charts/kasten/k10/charts/grafana/templates/clusterrolebinding.yaml index aacf82606..bda9431a2 100644 --- a/charts/kasten/k10/charts/grafana/templates/clusterrolebinding.yaml +++ b/charts/kasten/k10/charts/grafana/templates/clusterrolebinding.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled -}} {{- if and .Values.rbac.create (or (not .Values.rbac.namespaced) .Values.rbac.extraClusterRoleRules) }} kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 @@ -23,4 +22,3 @@ roleRef: {{- end }} apiGroup: rbac.authorization.k8s.io {{- end }} -{{- end -}} diff --git a/charts/kasten/k10/charts/grafana/templates/configmap-dashboard-provider.yaml b/charts/kasten/k10/charts/grafana/templates/configmap-dashboard-provider.yaml index 102d1da54..1f706a8bb 100644 --- a/charts/kasten/k10/charts/grafana/templates/configmap-dashboard-provider.yaml +++ b/charts/kasten/k10/charts/grafana/templates/configmap-dashboard-provider.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled -}} {{- if and .Values.sidecar.dashboards.enabled .Values.sidecar.dashboards.SCProvider }} apiVersion: v1 kind: ConfigMap @@ -28,4 +27,3 @@ data: foldersFromFilesStructure: {{ .Values.sidecar.dashboards.provider.foldersFromFilesStructure }} path: {{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }} {{- end }} -{{- end -}} diff --git a/charts/kasten/k10/charts/grafana/templates/configmap.yaml b/charts/kasten/k10/charts/grafana/templates/configmap.yaml index 1808d96c4..7b837d90b 100644 --- a/charts/kasten/k10/charts/grafana/templates/configmap.yaml +++ b/charts/kasten/k10/charts/grafana/templates/configmap.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled -}} {{- if .Values.createConfigmap }} {{- $files := .Files }} {{- $root := . -}} @@ -143,4 +142,3 @@ data: {{- end }} {{- end }} {{- end }} -{{- end -}} diff --git a/charts/kasten/k10/charts/grafana/templates/dashboards-json-configmap.yaml b/charts/kasten/k10/charts/grafana/templates/dashboards-json-configmap.yaml index e431b3bb7..b96ce7202 100644 --- a/charts/kasten/k10/charts/grafana/templates/dashboards-json-configmap.yaml +++ b/charts/kasten/k10/charts/grafana/templates/dashboards-json-configmap.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled -}} {{- if .Values.dashboards }} {{ $files := .Files }} {{- range $provider, $dashboards := .Values.dashboards }} @@ -37,4 +36,3 @@ data: {{- end }} {{- end }} -{{- end -}} diff --git a/charts/kasten/k10/charts/grafana/templates/deployment.yaml b/charts/kasten/k10/charts/grafana/templates/deployment.yaml index 513ab6bf0..bfa26bb40 100644 --- a/charts/kasten/k10/charts/grafana/templates/deployment.yaml +++ b/charts/kasten/k10/charts/grafana/templates/deployment.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled -}} {{- if (and (not .Values.useStatefulSet) (or (not .Values.persistence.enabled) (eq .Values.persistence.type "pvc"))) }} apiVersion: apps/v1 kind: Deployment @@ -50,4 +49,3 @@ spec: spec: {{- include "grafana.pod" . | nindent 6 }} {{- end }} -{{- end -}} diff --git a/charts/kasten/k10/charts/grafana/templates/extra-manifests.yaml b/charts/kasten/k10/charts/grafana/templates/extra-manifests.yaml index 39e0d02b2..a9bb3b6ba 100644 --- a/charts/kasten/k10/charts/grafana/templates/extra-manifests.yaml +++ b/charts/kasten/k10/charts/grafana/templates/extra-manifests.yaml @@ -1,6 +1,4 @@ -{{- if .Values.enabled -}} {{ range .Values.extraObjects }} --- {{ tpl (toYaml .) $ }} {{ end }} -{{- end -}} diff --git a/charts/kasten/k10/charts/grafana/templates/headless-service.yaml b/charts/kasten/k10/charts/grafana/templates/headless-service.yaml index ef3b1bcb8..3028589d3 100644 --- a/charts/kasten/k10/charts/grafana/templates/headless-service.yaml +++ b/charts/kasten/k10/charts/grafana/templates/headless-service.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled -}} {{- $sts := list "sts" "StatefulSet" "statefulset" -}} {{- if or .Values.headlessService (and .Values.persistence.enabled (not .Values.persistence.existingClaim) (has .Values.persistence.type $sts)) }} apiVersion: v1 @@ -21,4 +20,3 @@ spec: - name: {{ .Values.gossipPortName }}-tcp port: 9094 {{- end }} -{{- end -}} diff --git a/charts/kasten/k10/charts/grafana/templates/hpa.yaml b/charts/kasten/k10/charts/grafana/templates/hpa.yaml index 2d6556f54..46bbcb49a 100644 --- a/charts/kasten/k10/charts/grafana/templates/hpa.yaml +++ b/charts/kasten/k10/charts/grafana/templates/hpa.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled -}} {{- $sts := list "sts" "StatefulSet" "statefulset" -}} {{- if .Values.autoscaling.enabled }} apiVersion: {{ include "grafana.hpa.apiVersion" . }} @@ -51,4 +50,3 @@ spec: behavior: {{ toYaml .Values.autoscaling.behavior | nindent 4 }} {{- end }} {{- end }} -{{- end -}} diff --git a/charts/kasten/k10/charts/grafana/templates/image-renderer-deployment.yaml b/charts/kasten/k10/charts/grafana/templates/image-renderer-deployment.yaml index abd9a1d9b..93d20e8e7 100644 --- a/charts/kasten/k10/charts/grafana/templates/image-renderer-deployment.yaml +++ b/charts/kasten/k10/charts/grafana/templates/image-renderer-deployment.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled -}} {{ if .Values.imageRenderer.enabled }} {{- $root := . -}} apiVersion: apps/v1 @@ -129,4 +128,3 @@ spec: - name: image-renderer-tmpfs emptyDir: {} {{- end }} -{{- end -}} diff --git a/charts/kasten/k10/charts/grafana/templates/image-renderer-hpa.yaml b/charts/kasten/k10/charts/grafana/templates/image-renderer-hpa.yaml index 380941656..b0f0059b7 100644 --- a/charts/kasten/k10/charts/grafana/templates/image-renderer-hpa.yaml +++ b/charts/kasten/k10/charts/grafana/templates/image-renderer-hpa.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled -}} {{- if and .Values.imageRenderer.enabled .Values.imageRenderer.autoscaling.enabled }} apiVersion: {{ include "grafana.hpa.apiVersion" . }} kind: HorizontalPodAutoscaler @@ -46,4 +45,3 @@ spec: behavior: {{ toYaml .Values.imageRenderer.autoscaling.behavior | nindent 4 }} {{- end }} {{- end }} -{{- end -}} diff --git a/charts/kasten/k10/charts/grafana/templates/image-renderer-network-policy.yaml b/charts/kasten/k10/charts/grafana/templates/image-renderer-network-policy.yaml index aaa7fc49b..d1a0eb313 100644 --- a/charts/kasten/k10/charts/grafana/templates/image-renderer-network-policy.yaml +++ b/charts/kasten/k10/charts/grafana/templates/image-renderer-network-policy.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled -}} {{- if and .Values.imageRenderer.enabled .Values.imageRenderer.networkPolicy.limitIngress }} --- apiVersion: networking.k8s.io/v1 @@ -78,4 +77,3 @@ spec: {{- toYaml . | nindent 14 }} {{- end }} {{- end }} -{{- end -}} diff --git a/charts/kasten/k10/charts/grafana/templates/image-renderer-service.yaml b/charts/kasten/k10/charts/grafana/templates/image-renderer-service.yaml index 9957705ed..f8da127cf 100644 --- a/charts/kasten/k10/charts/grafana/templates/image-renderer-service.yaml +++ b/charts/kasten/k10/charts/grafana/templates/image-renderer-service.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled -}} {{- if and .Values.imageRenderer.enabled .Values.imageRenderer.service.enabled }} apiVersion: v1 kind: Service @@ -30,4 +29,3 @@ spec: selector: {{- include "grafana.imageRenderer.selectorLabels" . | nindent 4 }} {{- end }} -{{- end -}} diff --git a/charts/kasten/k10/charts/grafana/templates/image-renderer-servicemonitor.yaml b/charts/kasten/k10/charts/grafana/templates/image-renderer-servicemonitor.yaml index 6e1529a8b..5d9f09d26 100644 --- a/charts/kasten/k10/charts/grafana/templates/image-renderer-servicemonitor.yaml +++ b/charts/kasten/k10/charts/grafana/templates/image-renderer-servicemonitor.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled -}} {{- if .Values.imageRenderer.serviceMonitor.enabled }} --- apiVersion: monitoring.coreos.com/v1 @@ -47,4 +46,3 @@ spec: {{- toYaml . | nindent 4 }} {{- end }} {{- end }} -{{- end -}} diff --git a/charts/kasten/k10/charts/grafana/templates/ingress.yaml b/charts/kasten/k10/charts/grafana/templates/ingress.yaml index 90c3e1b65..063cdfaa5 100644 --- a/charts/kasten/k10/charts/grafana/templates/ingress.yaml +++ b/charts/kasten/k10/charts/grafana/templates/ingress.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled -}} {{- if .Values.ingress.enabled -}} {{- $ingressApiIsStable := eq (include "grafana.ingress.isStable" .) "true" -}} {{- $ingressSupportsIngressClassName := eq (include "grafana.ingress.supportsIngressClassName" .) "true" -}} @@ -77,4 +76,3 @@ spec: {{- end }} {{- end -}} {{- end }} -{{- end -}} diff --git a/charts/kasten/k10/charts/grafana/templates/networkpolicy.yaml b/charts/kasten/k10/charts/grafana/templates/networkpolicy.yaml index 7036ccd40..ea4578bec 100644 --- a/charts/kasten/k10/charts/grafana/templates/networkpolicy.yaml +++ b/charts/kasten/k10/charts/grafana/templates/networkpolicy.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled -}} {{- if .Values.networkPolicy.enabled }} apiVersion: networking.k8s.io/v1 kind: NetworkPolicy @@ -51,4 +50,3 @@ spec: {{- end }} {{- end }} {{- end }} -{{- end -}} diff --git a/charts/kasten/k10/charts/grafana/templates/poddisruptionbudget.yaml b/charts/kasten/k10/charts/grafana/templates/poddisruptionbudget.yaml index 70347096b..05251214a 100644 --- a/charts/kasten/k10/charts/grafana/templates/poddisruptionbudget.yaml +++ b/charts/kasten/k10/charts/grafana/templates/poddisruptionbudget.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled -}} {{- if .Values.podDisruptionBudget }} apiVersion: {{ include "grafana.podDisruptionBudget.apiVersion" . }} kind: PodDisruptionBudget @@ -21,4 +20,3 @@ spec: matchLabels: {{- include "grafana.selectorLabels" . | nindent 6 }} {{- end }} -{{- end -}} diff --git a/charts/kasten/k10/charts/grafana/templates/podsecuritypolicy.yaml b/charts/kasten/k10/charts/grafana/templates/podsecuritypolicy.yaml index c56e005ff..eed7af95b 100644 --- a/charts/kasten/k10/charts/grafana/templates/podsecuritypolicy.yaml +++ b/charts/kasten/k10/charts/grafana/templates/podsecuritypolicy.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled -}} {{- if and .Values.rbac.pspEnabled (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }} apiVersion: policy/v1beta1 kind: PodSecurityPolicy @@ -48,4 +47,3 @@ spec: max: 65535 readOnlyRootFilesystem: false {{- end }} -{{- end -}} diff --git a/charts/kasten/k10/charts/grafana/templates/pvc.yaml b/charts/kasten/k10/charts/grafana/templates/pvc.yaml index da3514e7a..eb8f87f07 100644 --- a/charts/kasten/k10/charts/grafana/templates/pvc.yaml +++ b/charts/kasten/k10/charts/grafana/templates/pvc.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled -}} {{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (eq .Values.persistence.type "pvc")}} apiVersion: v1 kind: PersistentVolumeClaim @@ -35,4 +34,3 @@ spec: {{- toYaml . | nindent 6 }} {{- end }} {{- end }} -{{- end -}} diff --git a/charts/kasten/k10/charts/grafana/templates/role.yaml b/charts/kasten/k10/charts/grafana/templates/role.yaml index eedcef46f..4b5edd978 100644 --- a/charts/kasten/k10/charts/grafana/templates/role.yaml +++ b/charts/kasten/k10/charts/grafana/templates/role.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled -}} {{- if and .Values.rbac.create (not .Values.rbac.useExistingRole) -}} apiVersion: rbac.authorization.k8s.io/v1 kind: Role @@ -31,4 +30,3 @@ rules: rules: [] {{- end }} {{- end }} -{{- end -}} diff --git a/charts/kasten/k10/charts/grafana/templates/rolebinding.yaml b/charts/kasten/k10/charts/grafana/templates/rolebinding.yaml index 3e1fd766c..58f77c6b0 100644 --- a/charts/kasten/k10/charts/grafana/templates/rolebinding.yaml +++ b/charts/kasten/k10/charts/grafana/templates/rolebinding.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled -}} {{- if .Values.rbac.create }} apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding @@ -24,4 +23,3 @@ subjects: name: {{ include "grafana.serviceAccountName" . }} namespace: {{ include "grafana.namespace" . }} {{- end }} -{{- end -}} diff --git a/charts/kasten/k10/charts/grafana/templates/secret-env.yaml b/charts/kasten/k10/charts/grafana/templates/secret-env.yaml index 7f5ff8328..eb14aac70 100644 --- a/charts/kasten/k10/charts/grafana/templates/secret-env.yaml +++ b/charts/kasten/k10/charts/grafana/templates/secret-env.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled -}} {{- if .Values.envRenderSecret }} apiVersion: v1 kind: Secret @@ -13,4 +12,3 @@ data: {{ $key }}: {{ tpl ($val | toString) $ | b64enc | quote }} {{- end }} {{- end }} -{{- end -}} diff --git a/charts/kasten/k10/charts/grafana/templates/secret.yaml b/charts/kasten/k10/charts/grafana/templates/secret.yaml index bf9004030..5cbd52744 100644 --- a/charts/kasten/k10/charts/grafana/templates/secret.yaml +++ b/charts/kasten/k10/charts/grafana/templates/secret.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled -}} {{- if or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret)) }} apiVersion: v1 kind: Secret @@ -25,4 +24,3 @@ data: ldap-toml: {{ tpl .Values.ldap.config $ | b64enc | quote }} {{- end }} {{- end }} -{{- end -}} diff --git a/charts/kasten/k10/charts/grafana/templates/service.yaml b/charts/kasten/k10/charts/grafana/templates/service.yaml index f30e71fcb..43d360b5e 100644 --- a/charts/kasten/k10/charts/grafana/templates/service.yaml +++ b/charts/kasten/k10/charts/grafana/templates/service.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled -}} {{- if .Values.service.enabled }} {{- $root := . }} apiVersion: v1 @@ -54,4 +53,3 @@ spec: selector: {{- include "grafana.selectorLabels" . | nindent 4 }} {{- end }} -{{- end -}} diff --git a/charts/kasten/k10/charts/grafana/templates/serviceaccount.yaml b/charts/kasten/k10/charts/grafana/templates/serviceaccount.yaml index 9e6ffca92..784e71ba6 100644 --- a/charts/kasten/k10/charts/grafana/templates/serviceaccount.yaml +++ b/charts/kasten/k10/charts/grafana/templates/serviceaccount.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled -}} {{- if .Values.serviceAccount.create }} {{- $root := . -}} apiVersion: v1 @@ -16,4 +15,3 @@ metadata: name: {{ include "grafana.serviceAccountName" . }} namespace: {{ include "grafana.namespace" . }} {{- end }} -{{- end -}} diff --git a/charts/kasten/k10/charts/grafana/templates/servicemonitor.yaml b/charts/kasten/k10/charts/grafana/templates/servicemonitor.yaml index 17ace094c..72396828f 100644 --- a/charts/kasten/k10/charts/grafana/templates/servicemonitor.yaml +++ b/charts/kasten/k10/charts/grafana/templates/servicemonitor.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled -}} {{- if .Values.serviceMonitor.enabled }} --- apiVersion: monitoring.coreos.com/v1 @@ -51,4 +50,3 @@ spec: {{- toYaml . | nindent 4 }} {{- end }} {{- end }} -{{- end -}} diff --git a/charts/kasten/k10/charts/grafana/templates/statefulset.yaml b/charts/kasten/k10/charts/grafana/templates/statefulset.yaml index 69b4373f7..e6c944a4d 100644 --- a/charts/kasten/k10/charts/grafana/templates/statefulset.yaml +++ b/charts/kasten/k10/charts/grafana/templates/statefulset.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled -}} {{- $sts := list "sts" "StatefulSet" "statefulset" -}} {{- if (or (.Values.useStatefulSet) (and .Values.persistence.enabled (not .Values.persistence.existingClaim) (has .Values.persistence.type $sts)))}} apiVersion: apps/v1 @@ -55,4 +54,3 @@ spec: {{- end }} {{- end }} {{- end }} -{{- end -}} diff --git a/charts/kasten/k10/charts/prometheus/charts/alertmanager/templates/configmap.yaml b/charts/kasten/k10/charts/prometheus/charts/alertmanager/templates/configmap.yaml index ba61ab309..9ed6c02a2 100644 --- a/charts/kasten/k10/charts/prometheus/charts/alertmanager/templates/configmap.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/alertmanager/templates/configmap.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled }} {{- if .Values.config }} apiVersion: v1 kind: ConfigMap @@ -19,4 +18,3 @@ data: {{- $value | nindent 4 }} {{- end }} {{- end }} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/charts/alertmanager/templates/ingress.yaml b/charts/kasten/k10/charts/prometheus/charts/alertmanager/templates/ingress.yaml index faefc1192..8de79d7c2 100644 --- a/charts/kasten/k10/charts/prometheus/charts/alertmanager/templates/ingress.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/alertmanager/templates/ingress.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled }} {{- if .Values.ingress.enabled }} {{- $fullName := include "alertmanager.fullname" . }} {{- $svcPort := .Values.service.port }} @@ -55,4 +54,3 @@ spec: {{- end }} {{- end }} {{- end }} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/charts/alertmanager/templates/pdb.yaml b/charts/kasten/k10/charts/prometheus/charts/alertmanager/templates/pdb.yaml index c15708252..103e9ecde 100644 --- a/charts/kasten/k10/charts/prometheus/charts/alertmanager/templates/pdb.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/alertmanager/templates/pdb.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled }} {{- if .Values.podDisruptionBudget }} apiVersion: {{ include "alertmanager.pdb.apiVersion" . }} kind: PodDisruptionBudget @@ -13,4 +12,3 @@ spec: {{- include "alertmanager.selectorLabels" . | nindent 6 }} {{- toYaml .Values.podDisruptionBudget | nindent 2 }} {{- end }} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/charts/alertmanager/templates/serviceaccount.yaml b/charts/kasten/k10/charts/prometheus/charts/alertmanager/templates/serviceaccount.yaml index 2104e2e07..bc9ccaaff 100644 --- a/charts/kasten/k10/charts/prometheus/charts/alertmanager/templates/serviceaccount.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/alertmanager/templates/serviceaccount.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled }} {{- if .Values.serviceAccount.create }} apiVersion: v1 kind: ServiceAccount @@ -13,4 +12,3 @@ metadata: namespace: {{ include "alertmanager.namespace" . }} automountServiceAccountToken: {{ .Values.automountServiceAccountToken }} {{- end }} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/charts/alertmanager/templates/services.yaml b/charts/kasten/k10/charts/prometheus/charts/alertmanager/templates/services.yaml index 7feb45f6d..ce0876c77 100644 --- a/charts/kasten/k10/charts/prometheus/charts/alertmanager/templates/services.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/alertmanager/templates/services.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled }} apiVersion: v1 kind: Service metadata: @@ -58,4 +57,3 @@ spec: {{- end }} selector: {{- include "alertmanager.selectorLabels" . | nindent 4 }} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/charts/alertmanager/templates/statefulset.yaml b/charts/kasten/k10/charts/prometheus/charts/alertmanager/templates/statefulset.yaml index f9630df2b..0c4733a1b 100644 --- a/charts/kasten/k10/charts/prometheus/charts/alertmanager/templates/statefulset.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/alertmanager/templates/statefulset.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled }} {{- $svcClusterPort := .Values.service.clusterPort }} apiVersion: apps/v1 kind: StatefulSet @@ -223,4 +222,3 @@ spec: - name: storage emptyDir: {} {{- end }} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/ciliumnetworkpolicy.yaml b/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/ciliumnetworkpolicy.yaml index bb9ee933b..025cd47a8 100644 --- a/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/ciliumnetworkpolicy.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/ciliumnetworkpolicy.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled }} {{- if and .Values.networkPolicy.enabled (eq .Values.networkPolicy.flavor "cilium") }} apiVersion: cilium.io/v2 kind: CiliumNetworkPolicy @@ -32,4 +31,3 @@ spec: protocol: TCP {{ end }} {{ end }} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/clusterrolebinding.yaml b/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/clusterrolebinding.yaml index 84cb97ac9..cf9f628d0 100644 --- a/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/clusterrolebinding.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/clusterrolebinding.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled }} {{- if and .Values.rbac.create .Values.rbac.useClusterRole -}} apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -19,4 +18,3 @@ subjects: name: {{ template "kube-state-metrics.serviceAccountName" . }} namespace: {{ template "kube-state-metrics.namespace" . }} {{- end -}} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/crs-configmap.yaml b/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/crs-configmap.yaml index e91a268f4..72986a607 100644 --- a/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/crs-configmap.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/crs-configmap.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled }} {{- if .Values.customResourceState.enabled}} apiVersion: v1 kind: ConfigMap @@ -8,4 +7,3 @@ data: config.yaml: | {{- toYaml .Values.customResourceState.config | nindent 4 }} {{- end }} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/deployment.yaml b/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/deployment.yaml index bb5a5a4f8..1ee76bd59 100644 --- a/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/deployment.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/deployment.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled }} apiVersion: apps/v1 {{- if .Values.autosharding.enabled }} kind: StatefulSet @@ -277,4 +276,3 @@ spec: {{ toYaml .Values.volumes | indent 8 }} {{- end }} {{- end }} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/extra-manifests.yaml b/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/extra-manifests.yaml index 3a29324a3..567f7bf32 100644 --- a/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/extra-manifests.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/extra-manifests.yaml @@ -1,6 +1,4 @@ -{{- if .Values.enabled }} {{ range .Values.extraManifests }} --- {{ tpl (toYaml .) $ }} {{ end }} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/kubeconfig-secret.yaml b/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/kubeconfig-secret.yaml index 08e53b6e7..6af008450 100644 --- a/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/kubeconfig-secret.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/kubeconfig-secret.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled }} {{- if .Values.kubeconfig.enabled -}} apiVersion: v1 kind: Secret @@ -11,4 +10,3 @@ type: Opaque data: config: '{{ .Values.kubeconfig.secret }}' {{- end -}} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/networkpolicy.yaml b/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/networkpolicy.yaml index 1df6a2460..309b38ec5 100644 --- a/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/networkpolicy.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/networkpolicy.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled }} {{- if and .Values.networkPolicy.enabled (eq .Values.networkPolicy.flavor "kubernetes") }} kind: NetworkPolicy apiVersion: networking.k8s.io/v1 @@ -42,4 +41,3 @@ spec: - Ingress - Egress {{- end }} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/pdb.yaml b/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/pdb.yaml index e06bb917e..3771b511d 100644 --- a/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/pdb.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/pdb.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled }} {{- if .Values.podDisruptionBudget -}} {{ if $.Capabilities.APIVersions.Has "policy/v1/PodDisruptionBudget" -}} apiVersion: policy/v1 @@ -17,4 +16,3 @@ spec: app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} {{ toYaml .Values.podDisruptionBudget | indent 2 }} {{- end -}} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/podsecuritypolicy.yaml b/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/podsecuritypolicy.yaml index 36f46ad2d..8905e113e 100644 --- a/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/podsecuritypolicy.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/podsecuritypolicy.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled }} {{- if and .Values.podSecurityPolicy.enabled (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }} apiVersion: policy/v1beta1 kind: PodSecurityPolicy @@ -38,4 +37,3 @@ spec: max: 65535 readOnlyRootFilesystem: false {{- end }} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/psp-clusterrole.yaml b/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/psp-clusterrole.yaml index d99b1503c..654e4a3d5 100644 --- a/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/psp-clusterrole.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/psp-clusterrole.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled }} {{- if and .Values.podSecurityPolicy.enabled (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }} apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole @@ -18,4 +17,3 @@ rules: resourceNames: - {{ template "kube-state-metrics.fullname" . }} {{- end }} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/psp-clusterrolebinding.yaml b/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/psp-clusterrolebinding.yaml index ca6d80dfe..5b62a18bd 100644 --- a/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/psp-clusterrolebinding.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/psp-clusterrolebinding.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled }} {{- if and .Values.podSecurityPolicy.enabled (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }} apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -15,4 +14,3 @@ subjects: name: {{ template "kube-state-metrics.serviceAccountName" . }} namespace: {{ template "kube-state-metrics.namespace" . }} {{- end }} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/rbac-configmap.yaml b/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/rbac-configmap.yaml index e4a62472e..39ed577c1 100644 --- a/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/rbac-configmap.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/rbac-configmap.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled }} {{- if .Values.kubeRBACProxy.enabled}} apiVersion: v1 kind: ConfigMap @@ -15,4 +14,3 @@ data: subresource: {{ template "kube-state-metrics.fullname" . }} name: {{ template "kube-state-metrics.fullname" . }} {{- end }} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/role.yaml b/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/role.yaml index 406b2ead3..d33687f2d 100644 --- a/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/role.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/role.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled }} {{- if and (eq .Values.rbac.create true) (not .Values.rbac.useExistingRole) -}} {{- range (ternary (join "," .Values.namespaces | split "," ) (list "") (eq $.Values.rbac.useClusterRole false)) }} --- @@ -211,4 +210,3 @@ rules: {{ end }} {{- end -}} {{- end -}} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/rolebinding.yaml b/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/rolebinding.yaml index c301a79be..330651b73 100644 --- a/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/rolebinding.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/rolebinding.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled }} {{- if and (eq .Values.rbac.create true) (eq .Values.rbac.useClusterRole false) -}} {{- range (join "," $.Values.namespaces) | split "," }} --- @@ -23,4 +22,3 @@ subjects: namespace: {{ template "kube-state-metrics.namespace" $ }} {{- end -}} {{- end -}} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/service.yaml b/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/service.yaml index 4236ce036..6c486a662 100644 --- a/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/service.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/service.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled }} apiVersion: v1 kind: Service metadata: @@ -48,4 +47,3 @@ spec: {{- end }} selector: {{- include "kube-state-metrics.selectorLabels" . | indent 4 }} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/serviceaccount.yaml b/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/serviceaccount.yaml index fd3b95b0c..a7ff4dd3d 100644 --- a/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/serviceaccount.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/serviceaccount.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled }} {{- if .Values.serviceAccount.create -}} apiVersion: v1 kind: ServiceAccount @@ -14,4 +13,3 @@ metadata: imagePullSecrets: {{- include "kube-state-metrics.imagePullSecrets" (dict "Values" .Values "imagePullSecrets" .Values.serviceAccount.imagePullSecrets) | indent 2 }} {{- end -}} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/servicemonitor.yaml b/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/servicemonitor.yaml index 83c848d86..f98b3f36a 100644 --- a/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/servicemonitor.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/servicemonitor.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled }} {{- if .Values.prometheus.monitor.enabled }} apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor @@ -106,4 +105,3 @@ spec: {{- end }} {{- end }} {{- end }} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/stsdiscovery-role.yaml b/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/stsdiscovery-role.yaml index 054303aa2..489de147c 100644 --- a/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/stsdiscovery-role.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/stsdiscovery-role.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled }} {{- if and .Values.autosharding.enabled .Values.rbac.create -}} apiVersion: rbac.authorization.k8s.io/v1 kind: Role @@ -25,4 +24,3 @@ rules: - list - watch {{- end }} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/stsdiscovery-rolebinding.yaml b/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/stsdiscovery-rolebinding.yaml index 7ee741d77..73b37a4f6 100644 --- a/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/stsdiscovery-rolebinding.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/stsdiscovery-rolebinding.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled }} {{- if and .Values.autosharding.enabled .Values.rbac.create -}} apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding @@ -16,4 +15,3 @@ subjects: name: {{ template "kube-state-metrics.serviceAccountName" . }} namespace: {{ template "kube-state-metrics.namespace" . }} {{- end }} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/verticalpodautoscaler.yaml b/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/verticalpodautoscaler.yaml index d36c63fa6..f46305b51 100644 --- a/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/verticalpodautoscaler.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/kube-state-metrics/templates/verticalpodautoscaler.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled }} {{- if and (.Capabilities.APIVersions.Has "autoscaling.k8s.io/v1") (.Values.verticalPodAutoscaler.enabled) }} apiVersion: autoscaling.k8s.io/v1 kind: VerticalPodAutoscaler @@ -43,4 +42,3 @@ spec: {{- toYaml . | nindent 4 }} {{- end }} {{- end }} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/clusterrole.yaml b/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/clusterrole.yaml index f2cb114b3..1fd91150f 100644 --- a/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/clusterrole.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/clusterrole.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled }} {{- if and (eq .Values.rbac.create true) (eq .Values.kubeRBACProxy.enabled true) -}} apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole @@ -19,4 +18,3 @@ rules: verbs: [ "create" ] {{- end }} {{- end -}} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/clusterrolebinding.yaml b/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/clusterrolebinding.yaml index 8f52ed7a2..653305ad9 100644 --- a/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/clusterrolebinding.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/clusterrolebinding.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled }} {{- if and (eq .Values.rbac.create true) (eq .Values.kubeRBACProxy.enabled true) -}} apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -19,4 +18,3 @@ subjects: name: {{ template "prometheus-node-exporter.serviceAccountName" . }} namespace: {{ template "prometheus-node-exporter.namespace" . }} {{- end -}} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/daemonset.yaml b/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/daemonset.yaml index 55f3dafe4..c8a71add1 100644 --- a/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/daemonset.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/daemonset.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled }} apiVersion: apps/v1 kind: DaemonSet metadata: @@ -283,4 +282,3 @@ spec: configMap: name: {{ template "prometheus-node-exporter.fullname" . }}-rbac-config {{- end }} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/endpoints.yaml b/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/endpoints.yaml index e36e86e42..45eeb8d96 100644 --- a/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/endpoints.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/endpoints.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled }} {{- if .Values.endpoints }} apiVersion: v1 kind: Endpoints @@ -17,4 +16,3 @@ subsets: port: 9100 protocol: TCP {{- end }} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/extra-manifests.yaml b/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/extra-manifests.yaml index 3a29324a3..567f7bf32 100644 --- a/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/extra-manifests.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/extra-manifests.yaml @@ -1,6 +1,4 @@ -{{- if .Values.enabled }} {{ range .Values.extraManifests }} --- {{ tpl (toYaml .) $ }} {{ end }} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/networkpolicy.yaml b/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/networkpolicy.yaml index a1c3e5aef..825722729 100644 --- a/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/networkpolicy.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/networkpolicy.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled }} {{- if .Values.networkPolicy.enabled }} apiVersion: networking.k8s.io/v1 kind: NetworkPolicy @@ -22,4 +21,3 @@ spec: matchLabels: {{- include "prometheus-node-exporter.selectorLabels" . | nindent 6 }} {{- end }} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/podmonitor.yaml b/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/podmonitor.yaml index 94b253e00..f88da6a34 100644 --- a/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/podmonitor.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/podmonitor.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled }} {{- if .Values.prometheus.podMonitor.enabled }} apiVersion: {{ .Values.prometheus.podMonitor.apiVersion | default "monitoring.coreos.com/v1" }} kind: PodMonitor @@ -90,4 +89,3 @@ spec: {{- toYaml . | nindent 8 }} {{- end }} {{- end }} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/psp-clusterrole.yaml b/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/psp-clusterrole.yaml index 4a23fe220..895731724 100644 --- a/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/psp-clusterrole.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/psp-clusterrole.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled }} {{- if and .Values.rbac.create .Values.rbac.pspEnabled (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }} kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 @@ -13,4 +12,3 @@ rules: resourceNames: - {{ include "prometheus-node-exporter.fullname" . }} {{- end }} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/psp-clusterrolebinding.yaml b/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/psp-clusterrolebinding.yaml index 307bd9aac..333370173 100644 --- a/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/psp-clusterrolebinding.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/psp-clusterrolebinding.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled }} {{- if and .Values.rbac.create .Values.rbac.pspEnabled (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }} apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -15,4 +14,3 @@ subjects: name: {{ include "prometheus-node-exporter.fullname" . }} namespace: {{ include "prometheus-node-exporter.namespace" . }} {{- end }} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/psp.yaml b/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/psp.yaml index 1c0401ffd..4896c84da 100644 --- a/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/psp.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/psp.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled }} {{- if and .Values.rbac.create .Values.rbac.pspEnabled (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }} apiVersion: policy/v1beta1 kind: PodSecurityPolicy @@ -48,4 +47,3 @@ spec: max: 65535 readOnlyRootFilesystem: false {{- end }} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/rbac-configmap.yaml b/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/rbac-configmap.yaml index 4bde18ba2..3936cbdf9 100644 --- a/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/rbac-configmap.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/rbac-configmap.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled }} {{- if .Values.kubeRBACProxy.enabled}} apiVersion: v1 kind: ConfigMap @@ -15,4 +14,3 @@ data: subresource: {{ template "prometheus-node-exporter.fullname" . }} name: {{ template "prometheus-node-exporter.fullname" . }} {{- end }} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/service.yaml b/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/service.yaml index 5f409dff3..068a6bc71 100644 --- a/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/service.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/service.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled }} apiVersion: v1 kind: Service metadata: @@ -26,4 +25,3 @@ spec: name: {{ .Values.service.portName }} selector: {{- include "prometheus-node-exporter.selectorLabels" . | nindent 4 }} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/serviceaccount.yaml b/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/serviceaccount.yaml index e53634c6b..5c3348c09 100644 --- a/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/serviceaccount.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/serviceaccount.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled }} {{- if and .Values.rbac.create .Values.serviceAccount.create -}} apiVersion: v1 kind: ServiceAccount @@ -16,4 +15,3 @@ imagePullSecrets: {{- include "prometheus-node-exporter.imagePullSecrets" (dict "Values" .Values "imagePullSecrets" .Values.serviceAccount.imagePullSecrets) | indent 2 }} {{- end }} {{- end -}} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/servicemonitor.yaml b/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/servicemonitor.yaml index aef91f1bd..0d7a42eae 100644 --- a/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/servicemonitor.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/servicemonitor.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled }} {{- if .Values.prometheus.monitor.enabled }} apiVersion: {{ .Values.prometheus.monitor.apiVersion | default "monitoring.coreos.com/v1" }} kind: ServiceMonitor @@ -60,4 +59,3 @@ spec: {{- toYaml . | nindent 8 }} {{- end }} {{- end }} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/verticalpodautoscaler.yaml b/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/verticalpodautoscaler.yaml index 05d613824..2c2705f87 100644 --- a/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/verticalpodautoscaler.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/prometheus-node-exporter/templates/verticalpodautoscaler.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled }} {{- if and (.Capabilities.APIVersions.Has "autoscaling.k8s.io/v1") (.Values.verticalPodAutoscaler.enabled) }} apiVersion: autoscaling.k8s.io/v1 kind: VerticalPodAutoscaler @@ -39,4 +38,3 @@ spec: {{- toYaml . | nindent 4 }} {{- end }} {{- end }} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/charts/prometheus-pushgateway/templates/deployment.yaml b/charts/kasten/k10/charts/prometheus/charts/prometheus-pushgateway/templates/deployment.yaml index bcbc63848..557ca6f00 100644 --- a/charts/kasten/k10/charts/prometheus/charts/prometheus-pushgateway/templates/deployment.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/prometheus-pushgateway/templates/deployment.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled }} {{- if not .Values.runAsStatefulSet }} apiVersion: apps/v1 kind: Deployment @@ -27,4 +26,3 @@ spec: spec: {{- include "prometheus-pushgateway.podSpec" . | nindent 6 }} {{- end }} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/charts/prometheus-pushgateway/templates/ingress.yaml b/charts/kasten/k10/charts/prometheus/charts/prometheus-pushgateway/templates/ingress.yaml index 2040df7cb..237ac4a12 100644 --- a/charts/kasten/k10/charts/prometheus/charts/prometheus-pushgateway/templates/ingress.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/prometheus-pushgateway/templates/ingress.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled }} {{- if .Values.ingress.enabled }} {{- $serviceName := include "prometheus-pushgateway.fullname" . }} {{- $servicePort := .Values.service.port }} @@ -49,4 +48,3 @@ spec: {{- toYaml . | nindent 4 }} {{- end }} {{- end }} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/charts/prometheus-pushgateway/templates/networkpolicy.yaml b/charts/kasten/k10/charts/prometheus/charts/prometheus-pushgateway/templates/networkpolicy.yaml index 332b64398..d3b8019e3 100644 --- a/charts/kasten/k10/charts/prometheus/charts/prometheus-pushgateway/templates/networkpolicy.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/prometheus-pushgateway/templates/networkpolicy.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled }} {{- if .Values.networkPolicy }} apiVersion: {{ include "prometheus-pushgateway.networkPolicy.apiVersion" . }} kind: NetworkPolicy @@ -25,4 +24,3 @@ spec: {{- toYaml . | nindent 8 }} {{- end }} {{- end }} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/charts/prometheus-pushgateway/templates/pdb.yaml b/charts/kasten/k10/charts/prometheus/charts/prometheus-pushgateway/templates/pdb.yaml index dff71e2e0..6051133c6 100644 --- a/charts/kasten/k10/charts/prometheus/charts/prometheus-pushgateway/templates/pdb.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/prometheus-pushgateway/templates/pdb.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled }} {{- if .Values.podDisruptionBudget }} apiVersion: {{ include "prometheus-pushgateway.pdb.apiVersion" . }} kind: PodDisruptionBudget @@ -13,4 +12,3 @@ spec: {{- include "prometheus-pushgateway.selectorLabels" . | nindent 6 }} {{- toYaml .Values.podDisruptionBudget | nindent 2 }} {{- end }} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/charts/prometheus-pushgateway/templates/pushgateway-pvc.yaml b/charts/kasten/k10/charts/prometheus/charts/prometheus-pushgateway/templates/pushgateway-pvc.yaml index 805952374..cfad8760f 100644 --- a/charts/kasten/k10/charts/prometheus/charts/prometheus-pushgateway/templates/pushgateway-pvc.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/prometheus-pushgateway/templates/pushgateway-pvc.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled }} {{- if and (not .Values.runAsStatefulSet) .Values.persistentVolume.enabled (not .Values.persistentVolume.existingClaim) }} apiVersion: v1 kind: PersistentVolumeClaim @@ -34,4 +33,3 @@ spec: requests: storage: "{{ .Values.persistentVolume.size }}" {{- end }} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/charts/prometheus-pushgateway/templates/service.yaml b/charts/kasten/k10/charts/prometheus/charts/prometheus-pushgateway/templates/service.yaml index a3cde11cd..7a3562cd9 100644 --- a/charts/kasten/k10/charts/prometheus/charts/prometheus-pushgateway/templates/service.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/prometheus-pushgateway/templates/service.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled }} {{- $stsNoHeadlessSvcTypes := list "LoadBalancer" "NodePort" -}} apiVersion: v1 kind: Service @@ -40,4 +39,3 @@ spec: name: http selector: {{- include "prometheus-pushgateway.selectorLabels" . | nindent 4 }} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/charts/prometheus-pushgateway/templates/serviceaccount.yaml b/charts/kasten/k10/charts/prometheus/charts/prometheus-pushgateway/templates/serviceaccount.yaml index 6b9f65866..ab5e2452c 100644 --- a/charts/kasten/k10/charts/prometheus/charts/prometheus-pushgateway/templates/serviceaccount.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/prometheus-pushgateway/templates/serviceaccount.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled }} {{- if .Values.serviceAccount.create }} apiVersion: v1 kind: ServiceAccount @@ -15,4 +14,3 @@ metadata: name: {{ include "prometheus-pushgateway.serviceAccountName" . }} namespace: {{ template "prometheus-pushgateway.namespace" . }} {{- end }} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/charts/prometheus-pushgateway/templates/servicemonitor.yaml b/charts/kasten/k10/charts/prometheus/charts/prometheus-pushgateway/templates/servicemonitor.yaml index 792a2d686..5e3f75a11 100644 --- a/charts/kasten/k10/charts/prometheus/charts/prometheus-pushgateway/templates/servicemonitor.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/prometheus-pushgateway/templates/servicemonitor.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled }} {{- if .Values.serviceMonitor.enabled }} apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor @@ -50,4 +49,3 @@ spec: matchLabels: {{- include "prometheus-pushgateway.selectorLabels" . | nindent 6 }} {{- end -}} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/charts/prometheus-pushgateway/templates/statefulset.yaml b/charts/kasten/k10/charts/prometheus/charts/prometheus-pushgateway/templates/statefulset.yaml index 0633bb953..0d52a8dc9 100644 --- a/charts/kasten/k10/charts/prometheus/charts/prometheus-pushgateway/templates/statefulset.yaml +++ b/charts/kasten/k10/charts/prometheus/charts/prometheus-pushgateway/templates/statefulset.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled }} {{- if .Values.runAsStatefulSet }} apiVersion: apps/v1 kind: StatefulSet @@ -54,4 +53,3 @@ spec: storage: "{{ .Values.persistentVolume.size }}" {{- end }} {{- end }} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/templates/clusterrole.yaml b/charts/kasten/k10/charts/prometheus/templates/clusterrole.yaml index 2e9ad6407..da620c0b8 100644 --- a/charts/kasten/k10/charts/prometheus/templates/clusterrole.yaml +++ b/charts/kasten/k10/charts/prometheus/templates/clusterrole.yaml @@ -1,4 +1,3 @@ -{{- if .Values.server.enabled -}} {{- if and .Values.rbac.create (empty .Values.server.useExistingClusterRoleName) -}} apiVersion: {{ template "rbac.apiVersion" . }} kind: ClusterRole @@ -47,4 +46,3 @@ rules: verbs: - get {{- end }} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/templates/clusterrolebinding.yaml b/charts/kasten/k10/charts/prometheus/templates/clusterrolebinding.yaml index d31dafbf7..82814c305 100644 --- a/charts/kasten/k10/charts/prometheus/templates/clusterrolebinding.yaml +++ b/charts/kasten/k10/charts/prometheus/templates/clusterrolebinding.yaml @@ -1,4 +1,3 @@ -{{- if .Values.server.enabled -}} {{- if and .Values.rbac.create (empty .Values.server.namespaces) (empty .Values.server.useExistingClusterRoleName) -}} apiVersion: {{ template "rbac.apiVersion" . }} kind: ClusterRoleBinding @@ -15,4 +14,3 @@ roleRef: kind: ClusterRole name: {{ template "prometheus.server.clusterrolefullname" . }} {{- end }} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/templates/cm.yaml b/charts/kasten/k10/charts/prometheus/templates/cm.yaml index 4ed163525..a702b527e 100644 --- a/charts/kasten/k10/charts/prometheus/templates/cm.yaml +++ b/charts/kasten/k10/charts/prometheus/templates/cm.yaml @@ -1,4 +1,3 @@ -{{- if .Values.server.enabled -}} {{- if (empty .Values.server.configMapOverrideName) -}} apiVersion: v1 kind: ConfigMap @@ -94,4 +93,3 @@ data: {{- end -}} {{- end -}} {{- end -}} -{{- end -}} diff --git a/charts/kasten/k10/charts/prometheus/templates/deploy.yaml b/charts/kasten/k10/charts/prometheus/templates/deploy.yaml index c7fe700f1..ccebb404c 100644 --- a/charts/kasten/k10/charts/prometheus/templates/deploy.yaml +++ b/charts/kasten/k10/charts/prometheus/templates/deploy.yaml @@ -1,4 +1,3 @@ -{{- if .Values.server.enabled -}} {{- if not .Values.server.statefulSet.enabled -}} apiVersion: {{ template "prometheus.deployment.apiVersion" . }} kind: Deployment @@ -346,4 +345,3 @@ spec: {{- end -}} {{- end -}} {{- end -}} -{{- end -}} diff --git a/charts/kasten/k10/charts/prometheus/templates/extra-manifests.yaml b/charts/kasten/k10/charts/prometheus/templates/extra-manifests.yaml index 298416577..2b21b7106 100644 --- a/charts/kasten/k10/charts/prometheus/templates/extra-manifests.yaml +++ b/charts/kasten/k10/charts/prometheus/templates/extra-manifests.yaml @@ -1,6 +1,4 @@ -{{- if .Values.server.enabled -}} {{ range .Values.extraManifests }} --- {{ tpl . $ }} {{ end }} -{{- end -}} diff --git a/charts/kasten/k10/charts/prometheus/templates/headless-svc.yaml b/charts/kasten/k10/charts/prometheus/templates/headless-svc.yaml index 3d61ef166..df9db9914 100644 --- a/charts/kasten/k10/charts/prometheus/templates/headless-svc.yaml +++ b/charts/kasten/k10/charts/prometheus/templates/headless-svc.yaml @@ -1,4 +1,3 @@ -{{- if .Values.server.enabled -}} {{- if .Values.server.statefulSet.enabled -}} apiVersion: v1 kind: Service @@ -34,4 +33,3 @@ spec: selector: {{- include "prometheus.server.matchLabels" . | nindent 4 }} {{- end -}} -{{- end -}} diff --git a/charts/kasten/k10/charts/prometheus/templates/ingress.yaml b/charts/kasten/k10/charts/prometheus/templates/ingress.yaml index bebb858de..fc2468d8b 100644 --- a/charts/kasten/k10/charts/prometheus/templates/ingress.yaml +++ b/charts/kasten/k10/charts/prometheus/templates/ingress.yaml @@ -1,4 +1,3 @@ -{{- if .Values.server.enabled -}} {{- if .Values.server.ingress.enabled -}} {{- $ingressApiIsStable := eq (include "ingress.isStable" .) "true" -}} {{- $ingressSupportsIngressClassName := eq (include "ingress.supportsIngressClassName" .) "true" -}} @@ -56,4 +55,3 @@ spec: {{ toYaml .Values.server.ingress.tls | indent 4 }} {{- end -}} {{- end -}} -{{- end -}} diff --git a/charts/kasten/k10/charts/prometheus/templates/network-policy.yaml b/charts/kasten/k10/charts/prometheus/templates/network-policy.yaml index 634c860ac..3254ffc04 100644 --- a/charts/kasten/k10/charts/prometheus/templates/network-policy.yaml +++ b/charts/kasten/k10/charts/prometheus/templates/network-policy.yaml @@ -1,4 +1,3 @@ -{{- if .Values.server.enabled -}} {{- if .Values.networkPolicy.enabled }} apiVersion: {{ template "prometheus.networkPolicy.apiVersion" . }} kind: NetworkPolicy @@ -15,4 +14,3 @@ spec: - ports: - port: 9090 {{- end }} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/templates/pdb.yaml b/charts/kasten/k10/charts/prometheus/templates/pdb.yaml index c83cfe014..852f1bb8f 100644 --- a/charts/kasten/k10/charts/prometheus/templates/pdb.yaml +++ b/charts/kasten/k10/charts/prometheus/templates/pdb.yaml @@ -1,4 +1,3 @@ -{{- if .Values.server.enabled -}} {{- if .Values.server.podDisruptionBudget.enabled }} apiVersion: {{ template "prometheus.podDisruptionBudget.apiVersion" . }} kind: PodDisruptionBudget @@ -13,4 +12,3 @@ spec: matchLabels: {{- include "prometheus.server.matchLabels" . | nindent 6 }} {{- end }} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/templates/psp.yaml b/charts/kasten/k10/charts/prometheus/templates/psp.yaml index 1054aca06..5776e2541 100644 --- a/charts/kasten/k10/charts/prometheus/templates/psp.yaml +++ b/charts/kasten/k10/charts/prometheus/templates/psp.yaml @@ -1,4 +1,3 @@ -{{- if .Values.server.enabled -}} {{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled }} {{- if .Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy" }} apiVersion: policy/v1beta1 @@ -52,4 +51,3 @@ spec: readOnlyRootFilesystem: false {{- end }} {{- end }} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/templates/pvc.yaml b/charts/kasten/k10/charts/prometheus/templates/pvc.yaml index e6d4845a8..5a30a1bce 100644 --- a/charts/kasten/k10/charts/prometheus/templates/pvc.yaml +++ b/charts/kasten/k10/charts/prometheus/templates/pvc.yaml @@ -1,4 +1,3 @@ -{{- if .Values.server.enabled -}} {{- if not .Values.server.statefulSet.enabled -}} {{- if .Values.server.persistentVolume.enabled -}} {{- if not .Values.server.persistentVolume.existingClaim -}} @@ -45,4 +44,3 @@ spec: {{- end -}} {{- end -}} {{- end -}} -{{- end -}} diff --git a/charts/kasten/k10/charts/prometheus/templates/rolebinding.yaml b/charts/kasten/k10/charts/prometheus/templates/rolebinding.yaml index 5231b1477..bc112a3dd 100644 --- a/charts/kasten/k10/charts/prometheus/templates/rolebinding.yaml +++ b/charts/kasten/k10/charts/prometheus/templates/rolebinding.yaml @@ -1,4 +1,3 @@ -{{- if .Values.server.enabled -}} {{- if and .Values.rbac.create .Values.server.useExistingClusterRoleName .Values.server.namespaces -}} {{ range $.Values.server.namespaces -}} --- @@ -19,4 +18,3 @@ roleRef: name: {{ $.Values.server.useExistingClusterRoleName }} {{ end -}} {{ end -}} -{{ end -}} diff --git a/charts/kasten/k10/charts/prometheus/templates/service.yaml b/charts/kasten/k10/charts/prometheus/templates/service.yaml index 7738cefae..1aa384eb0 100644 --- a/charts/kasten/k10/charts/prometheus/templates/service.yaml +++ b/charts/kasten/k10/charts/prometheus/templates/service.yaml @@ -1,4 +1,3 @@ -{{- if .Values.server.enabled -}} {{- if .Values.server.service.enabled -}} apiVersion: v1 kind: Service @@ -59,4 +58,3 @@ spec: {{- end }} type: "{{ .Values.server.service.type }}" {{- end -}} -{{- end -}} diff --git a/charts/kasten/k10/charts/prometheus/templates/serviceaccount.yaml b/charts/kasten/k10/charts/prometheus/templates/serviceaccount.yaml index 7d8db1729..273aa7eed 100644 --- a/charts/kasten/k10/charts/prometheus/templates/serviceaccount.yaml +++ b/charts/kasten/k10/charts/prometheus/templates/serviceaccount.yaml @@ -1,4 +1,3 @@ -{{- if .Values.server.enabled -}} {{- if .Values.serviceAccounts.server.create }} apiVersion: v1 kind: ServiceAccount @@ -10,4 +9,3 @@ metadata: annotations: {{ toYaml .Values.serviceAccounts.server.annotations | indent 4 }} {{- end }} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/templates/sts.yaml b/charts/kasten/k10/charts/prometheus/templates/sts.yaml index f42f9fe99..ac11ab2a1 100644 --- a/charts/kasten/k10/charts/prometheus/templates/sts.yaml +++ b/charts/kasten/k10/charts/prometheus/templates/sts.yaml @@ -1,4 +1,3 @@ -{{- if .Values.server.enabled -}} {{- if .Values.server.statefulSet.enabled -}} apiVersion: apps/v1 kind: StatefulSet @@ -369,4 +368,3 @@ spec: {{- end -}} {{- end }} {{- end }} -{{- end }} diff --git a/charts/kasten/k10/charts/prometheus/templates/vpa.yaml b/charts/kasten/k10/charts/prometheus/templates/vpa.yaml index 84a33c9ae..cd07ad8b7 100644 --- a/charts/kasten/k10/charts/prometheus/templates/vpa.yaml +++ b/charts/kasten/k10/charts/prometheus/templates/vpa.yaml @@ -1,4 +1,3 @@ -{{- if .Values.server.enabled -}} {{- if .Values.server.verticalAutoscaler.enabled -}} {{- if .Capabilities.APIVersions.Has "autoscaling.k8s.io/v1/VerticalPodAutoscaler" }} apiVersion: autoscaling.k8s.io/v1 @@ -25,4 +24,3 @@ spec: resourcePolicy: containerPolicies: {{ .Values.server.verticalAutoscaler.containerPolicies | default list | toYaml | trim | nindent 4 }} {{- end -}} -{{- end -}} diff --git a/charts/kasten/k10/templates/NOTES.txt b/charts/kasten/k10/templates/NOTES.txt index d97dd5d49..a5acbf846 100644 --- a/charts/kasten/k10/templates/NOTES.txt +++ b/charts/kasten/k10/templates/NOTES.txt @@ -62,3 +62,16 @@ for more information. Deprecation warning: The `auth.dex` block of values will be deprecated in favor of `auth.openshift` and `auth.ldap` in version 6.5. -------------------- {{- end }} + +{{- if .Values.restore }} +{{- if or (empty .Values.restore.copyImagePullSecrets) (.Values.restore.copyImagePullSecrets) }} +-------------------- +Removal warning: The helm field `restore.copyImagePullSecrets` has been removed in version 6.0.12. K10 no longer copies the `imagePullSecret` to the application namespace. +-------------------- +{{- end }} +{{- end }} + +{{- if or (not (empty .Values.garbagecollector.importRunActions)) (not (empty .Values.garbagecollector.backupRunActions)) (not (empty .Values.garbagecollector.retireActions)) }} +Deprecation warning: The `garbagecollector.importRunActions`, `garbagecollector.backupRunActions`, `garbagecollector.retireActions` +blocks within the helm chart values have been replaced with `garbagecollector.actions`. +{{- end }} diff --git a/charts/kasten/k10/templates/_definitions.tpl b/charts/kasten/k10/templates/_definitions.tpl index c975a3328..d7839ee3c 100644 --- a/charts/kasten/k10/templates/_definitions.tpl +++ b/charts/kasten/k10/templates/_definitions.tpl @@ -93,9 +93,7 @@ state: {{- define "k10.defaultK10LimiterProviderSnapshots" -}}10{{- end -}} {{- define "k10.defaultK10GCDaemonPeriod" -}}21600{{- end -}} {{- define "k10.defaultK10GCKeepMaxActions" -}}1000{{- end -}} -{{- define "k10.defaultK10GCBackupRunActionsEnabled" -}}false{{- end -}} -{{- define "k10.defaultK10GCImportRunActionsEnabled" -}}false{{- end -}} -{{- define "k10.defaultK10GCRetireActionsEnabled" -}}false{{- end -}} +{{- define "k10.defaultK10GCActionsEnabled" -}}false{{- end -}} {{- define "k10.defaultK10ExecutorWorkerCount" -}}8{{- end -}} {{- define "k10.defaultK10ExecutorMaxConcurrentRestoreCsiSnapshots" -}}3{{- end -}} {{- define "k10.defaultK10ExecutorMaxConcurrentRestoreGenericVolumeSnapshots" -}}3{{- end -}} @@ -210,5 +208,5 @@ state-svc: {{- define "k10.multiClusterVersion" -}}2{{- end -}} {{- define "k10.mcExternalPort" -}}18000{{- end -}} {{- define "k10.defaultKubeVirtVMsUnfreezeTimeout" -}}5m{{- end -}} -{{- define "k10.kanisterToolsImageTag" -}}0.98.0{{- end -}} +{{- define "k10.kanisterToolsImageTag" -}}0.99.0{{- end -}} {{- define "k10.disabledServicesEnvVar" -}}K10_DISABLED_SERVICES{{- end -}} diff --git a/charts/kasten/k10/templates/_helpers.tpl b/charts/kasten/k10/templates/_helpers.tpl index f4f6e940a..f94f2ec2b 100644 --- a/charts/kasten/k10/templates/_helpers.tpl +++ b/charts/kasten/k10/templates/_helpers.tpl @@ -969,6 +969,13 @@ running in the same cluster. -}} {{- end -}} +{{/* Fail if Ironbank is enabled and the admin image is turned on */}} +{{- define "k10.fail.ironbankPdfReports" -}} + {{- if and (include "ironbank.enabled" .) (.Values.reporting.pdfReports) -}} + {{- fail "global.ironbank.enabled and reporting.pdfReports cannot both be enabled at the same time" -}} + {{- end -}} +{{- end -}} + {{/* Fail if Ironbank is enabled and images we don't support are turned on */}} {{- define "k10.fail.ironbankRHMarketplace" -}} {{- if and (include "ironbank.enabled" .) (.Values.global.rhMarketPlace) -}} diff --git a/charts/kasten/k10/templates/_k10_container.tpl b/charts/kasten/k10/templates/_k10_container.tpl index c45ba2c0e..2aa86f9d8 100644 --- a/charts/kasten/k10/templates/_k10_container.tpl +++ b/charts/kasten/k10/templates/_k10_container.tpl @@ -232,6 +232,8 @@ stating that types are not same for the equality check - name: K10_CAPABILITIES_MASK value: {{ $capabilities_mask | quote }} {{- end }} + - name: K10_HOST_SVC + value: {{ $pod }} {{- if eq $service "controllermanager" }} - name: K10_STATEFUL value: "{{ .Values.global.persistence.enabled }}" @@ -245,11 +247,6 @@ stating that types are not same for the equality check {{- if or .Values.global.imagePullSecret (or .Values.secrets.dockerConfig .Values.secrets.dockerConfigPath) }} - name: IMAGE_PULL_SECRET_NAMES value: {{ (trimSuffix " " (include "k10.imagePullSecretNames" .)) | toJson }} - - name: COPY_IMAGE_PULL_SECRETS - valueFrom: - configMapKeyRef: - name: k10-config - key: copyImagePullSecretsWhileRestore {{- end }} {{- end }} - name: MODEL_STORE_DIR @@ -403,21 +400,11 @@ stating that types are not same for the equality check configMapKeyRef: name: k10-config key: K10GCKeepMaxActions - - name: K10_GC_BACKUP_RUN_ACTIONS_ENABLED + - name: K10_GC_ACTIONS_ENABLED valueFrom: configMapKeyRef: name: k10-config - key: K10GCBackupRunActionsEnabled - - name: K10_GC_IMPORT_RUN_ACTIONS_ENABLED - valueFrom: - configMapKeyRef: - name: k10-config - key: K10GCImportRunActionsEnabled - - name: K10_GC_RETIRE_ACTIONS_ENABLED - valueFrom: - configMapKeyRef: - name: k10-config - key: K10GCRetireActionsEnabled + key: K10GCActionsEnabled {{- end }} {{- if (eq $service "executor") }} - name: K10_EXECUTOR_WORKER_COUNT diff --git a/charts/kasten/k10/templates/_k10_image_tag.tpl b/charts/kasten/k10/templates/_k10_image_tag.tpl index 037e64871..090926829 100644 --- a/charts/kasten/k10/templates/_k10_image_tag.tpl +++ b/charts/kasten/k10/templates/_k10_image_tag.tpl @@ -1 +1 @@ -{{- define "k10.imageTag" -}}6.0.11{{- end -}} \ No newline at end of file +{{- define "k10.imageTag" -}}6.0.12{{- end -}} \ No newline at end of file diff --git a/charts/kasten/k10/templates/_k10_metering.tpl b/charts/kasten/k10/templates/_k10_metering.tpl index 60b6899df..7572b793e 100644 --- a/charts/kasten/k10/templates/_k10_metering.tpl +++ b/charts/kasten/k10/templates/_k10_metering.tpl @@ -190,6 +190,8 @@ spec: - name: K10_CAPABILITIES_MASK value: {{ $capabilities_mask | quote }} {{- end }} + - name: K10_HOST_SVC + value: {{ $service }} - name: LOG_LEVEL valueFrom: configMapKeyRef: diff --git a/charts/kasten/k10/templates/ironbank.tpl b/charts/kasten/k10/templates/ironbank.tpl index 9c0ee3529..1dd7ba1cf 100644 --- a/charts/kasten/k10/templates/ironbank.tpl +++ b/charts/kasten/k10/templates/ironbank.tpl @@ -2,6 +2,7 @@ This file is used to fail the helm deployment if certain values are set which are not compatible with an Ironbank deployment. */}} -{{- include "k10.fail.ironbankRHMarketplace" . -}} {{- include "k10.fail.ironbankGrafana" . -}} +{{- include "k10.fail.ironbankPdfReports" . -}} {{- include "k10.fail.ironbankPrometheus" . -}} +{{- include "k10.fail.ironbankRHMarketplace" . -}} diff --git a/charts/kasten/k10/templates/k10-config.yaml b/charts/kasten/k10/templates/k10-config.yaml index 87c6194f0..d83f48d91 100644 --- a/charts/kasten/k10/templates/k10-config.yaml +++ b/charts/kasten/k10/templates/k10-config.yaml @@ -50,16 +50,12 @@ data: K10GCDaemonPeriod: {{ default (include "k10.defaultK10GCDaemonPeriod" .) .Values.garbagecollector.daemonPeriod | quote }} K10GCKeepMaxActions: {{ default (include "k10.defaultK10GCKeepMaxActions" .) .Values.garbagecollector.keepMaxActions | quote }} - K10GCBackupRunActionsEnabled: {{ default (include "k10.defaultK10GCBackupRunActionsEnabled" .) .Values.garbagecollector.backupRunActions.enabled | quote }} - K10GCImportRunActionsEnabled: {{ default (include "k10.defaultK10GCImportRunActionsEnabled" .) .Values.garbagecollector.importRunActions.enabled | quote }} - K10GCRetireActionsEnabled: {{ default (include "k10.defaultK10GCRetireActionsEnabled" .) .Values.garbagecollector.retireActions.enabled | quote }} + K10GCActionsEnabled: {{ default (include "k10.defaultK10GCActionsEnabled" .) .Values.garbagecollector.actions.enabled | quote }} kubeVirtVMsUnFreezeTimeout: {{ default (include "k10.defaultKubeVirtVMsUnfreezeTimeout" .) .Values.kubeVirtVMs.snapshot.unfreezeTimeout | quote }} k10JobMaxWaitDuration: {{ .Values.maxJobWaitDuration | quote }} - copyImagePullSecretsWhileRestore: {{ .Values.restore.copyImagePullSecrets | quote }} - {{- if .Values.awsConfig.efsBackupVaultName }} efsBackupVaultName: {{ quote .Values.awsConfig.efsBackupVaultName }} {{- end }} diff --git a/charts/kasten/k10/templates/{values}/grafana/values/grafana_values.tpl b/charts/kasten/k10/templates/{values}/grafana/values/grafana_values.tpl index f38fd36d5..7d5c754c0 100644 --- a/charts/kasten/k10/templates/{values}/grafana/values/grafana_values.tpl +++ b/charts/kasten/k10/templates/{values}/grafana/values/grafana_values.tpl @@ -34,6 +34,7 @@ selected to ensure that it is rendered before other templates! */}} +{{- if .Values.grafana.enabled }} {{- $grafana_prefix := printf "%s/grafana/" (include "k10.prefixPath" $) -}} {{- $grafana_scoped_values := (dict "Chart" (dict "Name" "grafana") "Release" .Release "Values" .Values.grafana) -}} @@ -243,3 +244,4 @@ ) | toYaml) -}} +{{- end }} diff --git a/charts/kasten/k10/values.schema.json b/charts/kasten/k10/values.schema.json index 0e4f20ee0..490bbf38a 100644 --- a/charts/kasten/k10/values.schema.json +++ b/charts/kasten/k10/values.schema.json @@ -1875,42 +1875,16 @@ "title": "Max actions to keep", "description": "Sets maximum actions to keep" }, - "importRunActions": { + "actions": { "type": "object", - "title": "importRunActions collector config", - "description": "Configure importRunActions garbage collector", + "title": "action collectors config", + "description": "Configure action garbage collectors", "properties": { "enabled": { "type": "boolean", "default": false, - "title": "Enable importRunActions collector", - "description": "Set true to enable importRunActions collector" - } - } - }, - "backupRunActions": { - "type": "object", - "title": "backupRunActions collector config", - "description": "Configure backupRunActions garbage collector", - "properties": { - "enabled": { - "type": "boolean", - "default": false, - "title": "Enable backupRunActions collector", - "description": "Set true to enable backupRunActions collector" - } - } - }, - "retireActions": { - "type": "object", - "title": "retireActions collector config", - "description": "Configure retireActions garbage collector", - "properties": { - "enabled": { - "type": "boolean", - "default": false, - "title": "Enable retireRunActions collector", - "description": "Set true to enable retireRunActions collector" + "title": "Enable action collectors", + "description": "Set true to enable action collectors" } } } @@ -2206,19 +2180,6 @@ } } }, - "restore":{ - "type": "object", - "title": "Restore related configuration", - "description": "Restore workflow related configurations", - "properties": { - "copyImagePullSecrets":{ - "type": "boolean", - "default": true, - "title": "Configures if imagePullSecret should be copied to application namespaces during the restore process", - "description": "When K10 is set up to use a private container registry, K10 by default copies the imagePullSecret from K10's namespace to the application namespace to allow the restore process to pull K10 container images. This option can be used to disable that." - } - } - }, "awsConfig": { "type": "object", "title": "AWS config", diff --git a/charts/kasten/k10/values.yaml b/charts/kasten/k10/values.yaml index 784a2c5ee..60e6652b6 100644 --- a/charts/kasten/k10/values.yaml +++ b/charts/kasten/k10/values.yaml @@ -360,11 +360,7 @@ genericVolumeSnapshot: garbagecollector: daemonPeriod: 21600 keepMaxActions: 1000 - importRunActions: - enabled: false - backupRunActions: - enabled: false - retireActions: + actions: enabled: false resources: {} @@ -417,12 +413,6 @@ kanister: podReadyWaitTimeout: 15 managedDataServicesBlueprintsEnabled: true -restore: - # K10 by default copies imagePullSecrets from K10's namespace to the namespace of the application being restored - # (required to pull the container images for the affinity and restore-data pods). This field - # can be used to disable that. - copyImagePullSecrets: true - awsConfig: assumeRoleDuration: "" efsBackupVaultName: "k10vault" diff --git a/charts/kong/kong/CHANGELOG.md b/charts/kong/kong/CHANGELOG.md index 5a1e4bbec..47ff4b7db 100644 --- a/charts/kong/kong/CHANGELOG.md +++ b/charts/kong/kong/CHANGELOG.md @@ -1,5 +1,26 @@ # Changelog +## 2.31.0 + +### Improvements + +* Added controller's RBAC rules for `KongUpstreamPolicy` CRD. + [#917](https://github.com/Kong/charts/pull/917) +* Added services resource to admission webhook config for KIC >= 3.0.0. + [#919](https://github.com/Kong/charts/pull/919) +* Update default ingress controller version to v3.0 + [#929](https://github.com/Kong/charts/pull/929) + [#930](https://github.com/Kong/charts/pull/930) + +### Fixed + +* The target port for cmetrics should only be applied if the ingress controller is enabled. + [#926](https://github.com/Kong/charts/pull/926) +* Fix RBAC for Gateway API v1. + [#928](https://github.com/Kong/charts/pull/928) +* Enable Admission webhook for Gateway API v1 resources. + [#928](https://github.com/Kong/charts/pull/928) + ## 2.30.0 ### Improvements @@ -16,7 +37,7 @@ world-accessible and runtime-created files are created in temporary directories created for the run as user. [#911](https://github.com/Kong/charts/pull/911) -* Allow using templates (via `tpl`) when specifying `controller.proxy.nameOverride`. +* Allow using templates (via `tpl`) when specifying `proxy.nameOverride`. [#914](https://github.com/Kong/charts/pull/914) ## 2.29.0 diff --git a/charts/kong/kong/Chart.yaml b/charts/kong/kong/Chart.yaml index bb97a8046..41842d221 100644 --- a/charts/kong/kong/Chart.yaml +++ b/charts/kong/kong/Chart.yaml @@ -18,4 +18,4 @@ maintainers: name: kong sources: - https://github.com/Kong/charts/tree/main/charts/kong -version: 2.30.0 +version: 2.31.0 diff --git a/charts/kong/kong/README.md b/charts/kong/kong/README.md index efdedf6a3..5840c0a96 100644 --- a/charts/kong/kong/README.md +++ b/charts/kong/kong/README.md @@ -581,7 +581,11 @@ namespaces. Limiting access requires several changes to configuration: Setting `deployment.daemonset: true` deploys Kong using a [DaemonSet controller](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) instead of a Deployment controller. This runs a Kong Pod on every kubelet in -the Kubernetes cluster. +the Kubernetes cluster. For such configuration it may be desirable to configure +Pods to use the network of the host they run on instead of a dedicated network +namespace. The benefit of this approach is that the Kong can bind ports directly +to Kubernetes nodes' network interfaces, without the extra network translation +imposed by NodePort Services. It can be achieved by setting `deployment.hostNetwork: true`. ### Using dnsPolicy and dnsConfig @@ -725,7 +729,7 @@ section of `values.yaml` file: |--------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------| | enabled | Deploy the ingress controller, rbac and crd | true | | image.repository | Docker image with the ingress controller | kong/kubernetes-ingress-controller | -| image.tag | Version of the ingress controller | `2.12` | +| image.tag | Version of the ingress controller | `3.0` | | image.effectiveSemver | Version of the ingress controller used for version-specific features when image.tag is not a valid semantic version | | | readinessProbe | Kong ingress controllers readiness probe | | | livenessProbe | Kong ingress controllers liveness probe | | @@ -791,6 +795,12 @@ Kong Ingress Controller v2.9 has introduced gateway discovery which allows the controller to discover Gateway instances that it should configure using an Admin API Kubernetes service. +Using this feature requires a split release installation of Gateways and Ingress Controller. +For exemplar `values.yaml` files which use this feature please see: [examples README.md](./example-values/README.md). +or use the [`ingress` chart](../ingress/README.md) which can handle this for you. + +##### Configuration + You'll be able to configure this feature through configuration section under `ingressController.gatewayDiscovery`: @@ -813,12 +823,17 @@ You'll be able to configure this feature through configuration section under the chart will generate values for `name` and `namespace` based on the current release name and namespace. This is useful when consuming the `kong` chart as a subchart. -Using this feature requires a split release installation of Gateways and Ingress Controller. -For exemplar `values.yaml` files which use this feature please see: [examples README.md](./example-values/README.md). +Additionally, you can control the addresses that are generated for your Gateways +via the `--gateway-discovery-dns-strategy` CLI flag that can be set on the Ingress Controller +(or an equivalent environment variable: `CONTROLLER_GATEWAY_DISCOVERY_DNS_STRATEGY`). +It accepts 3 values which change the way that Gateway addresses are generated: +- `service` - for service scoped pod DNS names: `pod-ip-address.service-name.my-namespace.svc.cluster-domain.example` +- `pod` - for namespace scope pod DNS names: `pod-ip-address.my-namespace.pod.cluster-domain.example` +- `ip` (default, retains behavior introduced in v2.9) - for regular IP addresses When using `gatewayDiscovery`, you should consider configuring the Admin service to use mTLS client verification to make -this interface secure. Without that, anyone who can access the Admin API from inside the cluster can configure the Gateway -instances. +this interface secure. +Without that, anyone who can access the Admin API from inside the cluster can configure the Gateway instances. On the controller release side, that can be achieved by setting `ingressController.adminApi.tls.client.enabled` to `true`. By default, Helm will generate a certificate Secret named `-admin-api-keypair` and diff --git a/charts/kong/kong/ci/single-image-default-values.yaml b/charts/kong/kong/ci/single-image-default-values.yaml index f9183beb0..0402fe168 100644 --- a/charts/kong/kong/ci/single-image-default-values.yaml +++ b/charts/kong/kong/ci/single-image-default-values.yaml @@ -2,7 +2,7 @@ # use single image strings instead of repository/tag image: - unifiedRepoTag: kong:3.4 + unifiedRepoTag: kong:3.4.1 env: anonymous_reports: "off" @@ -10,4 +10,4 @@ ingressController: env: anonymous_reports: "false" image: - unifiedRepoTag: kong/kubernetes-ingress-controller:2.12 + unifiedRepoTag: kong/kubernetes-ingress-controller:3.0 diff --git a/charts/kong/kong/crds/custom-resource-definitions.yaml b/charts/kong/kong/crds/custom-resource-definitions.yaml index 03353de46..99b3a2c41 100644 --- a/charts/kong/kong/crds/custom-resource-definitions.yaml +++ b/charts/kong/kong/crds/custom-resource-definitions.yaml @@ -1,4 +1,4 @@ -# generated using: kubectl kustomize 'github.com/kong/kubernetes-ingress-controller/config/crd?ref=v2.12.0' +# generated using: kubectl kustomize 'github.com/kong/kubernetes-ingress-controller/config/crd?ref=v3.0.0' apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: @@ -773,7 +773,9 @@ spec: `Services` can be a target, OR `Endpoints` can be targets). properties: algorithm: - description: Algorithm is the load balancing algorithm to use. + description: 'Algorithm is the load balancing algorithm to use. Accepted + values are: "round-robin", "consistent-hashing", "least-connections", + "latency".' enum: - round-robin - consistent-hashing @@ -945,6 +947,13 @@ spec: type: integer type: object type: object + x-kubernetes-validations: + - message: '''proxy'' field is no longer supported, use Service''s annotations + instead' + rule: '!has(self.proxy)' + - message: '''route'' field is no longer supported, use Ingress'' annotations + instead' + rule: '!has(self.route)' served: true storage: true subresources: @@ -1198,6 +1207,387 @@ spec: --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + labels: + gateway.networking.k8s.io/policy: direct + name: kongupstreampolicies.configuration.konghq.com +spec: + group: configuration.konghq.com + names: + categories: + - kong-ingress-controller + kind: KongUpstreamPolicy + listKind: KongUpstreamPolicyList + plural: kongupstreampolicies + shortNames: + - kup + singular: kongupstreampolicy + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: "KongUpstreamPolicy allows configuring algorithm that should + be used for load balancing traffic between Kong Upstream's Targets. It also + allows configuring health checks for Kong Upstream's Targets. \n Its configuration + is similar to Kong Upstream object (https://docs.konghq.com/gateway/latest/admin-api/#upstream-object), + and it is applied to Kong Upstream objects created by the controller. \n + It can be attached to Services. To attach it to a Service, it has to be + annotated with `konghq.com/upstream-policy: `, where `` is the + name of the KongUpstreamPolicy object in the same namespace as the Service. + \n When attached to a Service, it will affect all Kong Upstreams created + for the Service. \n When attached to a Service used in a Gateway API *Route + rule with multiple BackendRefs, all of its Services MUST be configured with + the same KongUpstreamPolicy. Otherwise, the controller will *ignore* the + KongUpstreamPolicy. \n Note: KongUpstreamPolicy doesn't implement Gateway + API's GEP-713 strictly. In particular, it doesn't use the TargetRef for + attaching to Services and Gateway API *Routes - annotations are used instead. + This is to allow reusing the same KongUpstreamPolicy for multiple Services + and Gateway API *Routes." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec contains the configuration of the Kong upstream. + properties: + algorithm: + description: 'Algorithm is the load balancing algorithm to use. Accepted + values are: "round-robin", "consistent-hashing", "least-connections", + "latency".' + enum: + - round-robin + - consistent-hashing + - least-connections + - latency + type: string + hashOn: + description: HashOn defines how to calculate hash for consistent-hashing + load balancing algorithm. Algorithm must be set to "consistent-hashing" + for this field to have effect. + properties: + cookie: + description: Cookie is the name of the cookie to use as hash input. + type: string + cookiePath: + description: CookiePath is cookie path to set in the response + headers. + type: string + header: + description: Header is the name of the header to use as hash input. + type: string + input: + description: Input allows using one of the predefined inputs (ip, + consumer, path). For other parametrized inputs, use one of the + fields below. + enum: + - ip + - consumer + - path + type: string + queryArg: + description: QueryArg is the name of the query argument to use + as hash input. + type: string + uriCapture: + description: URICapture is the name of the URI capture group to + use as hash input. + type: string + type: object + hashOnFallback: + description: HashOnFallback defines how to calculate hash for consistent-hashing + load balancing algorithm if the primary hash function fails. Algorithm + must be set to "consistent-hashing" for this field to have effect. + properties: + cookie: + description: Cookie is the name of the cookie to use as hash input. + type: string + cookiePath: + description: CookiePath is cookie path to set in the response + headers. + type: string + header: + description: Header is the name of the header to use as hash input. + type: string + input: + description: Input allows using one of the predefined inputs (ip, + consumer, path). For other parametrized inputs, use one of the + fields below. + enum: + - ip + - consumer + - path + type: string + queryArg: + description: QueryArg is the name of the query argument to use + as hash input. + type: string + uriCapture: + description: URICapture is the name of the URI capture group to + use as hash input. + type: string + type: object + healthchecks: + description: Healthchecks defines the health check configurations + in Kong. + properties: + active: + description: Active configures active health check probing. + properties: + concurrency: + description: Concurrency is the number of targets to check + concurrently. + minimum: 1 + type: integer + headers: + additionalProperties: + items: + type: string + type: array + description: Headers is a list of HTTP headers to add to the + probe request. + type: object + healthy: + description: Healthy configures thresholds and HTTP status + codes to mark targets healthy for an upstream. + properties: + httpStatuses: + description: HTTPStatuses is a list of HTTP status codes + that Kong considers a success. + items: + description: HTTPStatus is an HTTP status code. + maximum: 599 + minimum: 100 + type: integer + type: array + interval: + description: Interval is the interval between active health + checks for an upstream in seconds when in a healthy + state. + minimum: 0 + type: integer + successes: + description: Successes is the number of successes to consider + a target healthy. + minimum: 0 + type: integer + type: object + httpPath: + description: HTTPPath is the path to use in GET HTTP request + to run as a probe. + pattern: ^/.*$ + type: string + httpsSni: + description: HTTPSSNI is the SNI to use in GET HTTPS request + to run as a probe. + type: string + httpsVerifyCertificate: + description: HTTPSVerifyCertificate is a boolean value that + indicates if the certificate should be verified. + type: boolean + timeout: + description: Timeout is the probe timeout in seconds. + minimum: 0 + type: integer + type: + description: Type determines whether to perform active health + checks using HTTP or HTTPS, or just attempt a TCP connection. + Accepted values are "http", "https", "tcp", "grpc", "grpcs". + enum: + - http + - https + - tcp + - grpc + - grpcs + type: string + unhealthy: + description: Unhealthy configures thresholds and HTTP status + codes to mark targets unhealthy for an upstream. + properties: + httpFailures: + description: HTTPFailures is the number of failures to + consider a target unhealthy. + minimum: 0 + type: integer + httpStatuses: + description: HTTPStatuses is a list of HTTP status codes + that Kong considers a failure. + items: + description: HTTPStatus is an HTTP status code. + maximum: 599 + minimum: 100 + type: integer + type: array + interval: + description: Interval is the interval between active health + checks for an upstream in seconds when in an unhealthy + state. + minimum: 0 + type: integer + tcpFailures: + description: TCPFailures is the number of TCP failures + in a row to consider a target unhealthy. + minimum: 0 + type: integer + timeouts: + description: Timeouts is the number of timeouts in a row + to consider a target unhealthy. + minimum: 0 + type: integer + type: object + type: object + passive: + description: Passive configures passive health check probing. + properties: + healthy: + description: Healthy configures thresholds and HTTP status + codes to mark targets healthy for an upstream. + properties: + httpStatuses: + description: HTTPStatuses is a list of HTTP status codes + that Kong considers a success. + items: + description: HTTPStatus is an HTTP status code. + maximum: 599 + minimum: 100 + type: integer + type: array + interval: + description: Interval is the interval between active health + checks for an upstream in seconds when in a healthy + state. + minimum: 0 + type: integer + successes: + description: Successes is the number of successes to consider + a target healthy. + minimum: 0 + type: integer + type: object + type: + description: Type determines whether to perform passive health + checks interpreting HTTP/HTTPS statuses, or just check for + TCP connection success. Accepted values are "http", "https", + "tcp", "grpc", "grpcs". + enum: + - http + - https + - tcp + - grpc + - grpcs + type: string + unhealthy: + description: Unhealthy configures thresholds and HTTP status + codes to mark targets unhealthy. + properties: + httpFailures: + description: HTTPFailures is the number of failures to + consider a target unhealthy. + minimum: 0 + type: integer + httpStatuses: + description: HTTPStatuses is a list of HTTP status codes + that Kong considers a failure. + items: + description: HTTPStatus is an HTTP status code. + maximum: 599 + minimum: 100 + type: integer + type: array + interval: + description: Interval is the interval between active health + checks for an upstream in seconds when in an unhealthy + state. + minimum: 0 + type: integer + tcpFailures: + description: TCPFailures is the number of TCP failures + in a row to consider a target unhealthy. + minimum: 0 + type: integer + timeouts: + description: Timeouts is the number of timeouts in a row + to consider a target unhealthy. + minimum: 0 + type: integer + type: object + type: object + threshold: + description: Threshold is the minimum percentage of the upstream’s + targets’ weight that must be available for the whole upstream + to be considered healthy. + type: integer + type: object + slots: + description: Slots is the number of slots in the load balancer algorithm. + If not set, the default value in Kong for the algorithm is used. + maximum: 65536 + minimum: 10 + type: integer + type: object + type: object + x-kubernetes-validations: + - message: Only one of spec.hashOn.(input|cookie|header|uriCapture|queryArg) + can be set. + rule: 'has(self.spec.hashOn) ? [has(self.spec.hashOn.input), has(self.spec.hashOn.cookie), + has(self.spec.hashOn.header), has(self.spec.hashOn.uriCapture), has(self.spec.hashOn.queryArg)].filter(fieldSet, + fieldSet == true).size() <= 1 : true' + - message: When spec.hashOn.cookie is set, spec.hashOn.cookiePath is required. + rule: 'has(self.spec.hashOn) && has(self.spec.hashOn.cookie) ? has(self.spec.hashOn.cookiePath) + : true' + - message: When spec.hashOn.cookiePath is set, spec.hashOn.cookie is required. + rule: 'has(self.spec.hashOn) && has(self.spec.hashOn.cookiePath) ? has(self.spec.hashOn.cookie) + : true' + - message: spec.algorithm must be set to "consistent-hashing" when spec.hashOn + is set. + rule: 'has(self.spec.hashOn) ? has(self.spec.algorithm) && self.spec.algorithm + == "consistent-hashing" : true' + - message: Only one of spec.hashOnFallback.(input|header|uriCapture|queryArg) + can be set. + rule: 'has(self.spec.hashOnFallback) ? [has(self.spec.hashOnFallback.input), + has(self.spec.hashOnFallback.header), has(self.spec.hashOnFallback.uriCapture), + has(self.spec.hashOnFallback.queryArg)].filter(fieldSet, fieldSet == true).size() + <= 1 : true' + - message: spec.algorithm must be set to "consistent-hashing" when spec.hashOnFallback + is set. + rule: 'has(self.spec.hashOnFallback) ? has(self.spec.algorithm) && self.spec.algorithm + == "consistent-hashing" : true' + - message: spec.hashOnFallback.cookie must not be set. + rule: 'has(self.spec.hashOnFallback) ? !has(self.spec.hashOnFallback.cookie) + : true' + - message: spec.hashOnFallback.cookiePath must not be set. + rule: 'has(self.spec.hashOnFallback) ? !has(self.spec.hashOnFallback.cookiePath) + : true' + - message: spec.healthchecks.passive.healthy.interval must not be set. + rule: 'has(self.spec.healthchecks) && has(self.spec.healthchecks.passive) + && has(self.spec.healthchecks.passive.healthy) ? !has(self.spec.healthchecks.passive.healthy.interval) + : true' + - message: spec.healthchecks.passive.unhealthy.interval must not be set. + rule: 'has(self.spec.healthchecks) && has(self.spec.healthchecks.passive) + && has(self.spec.healthchecks.passive.unhealthy) ? !has(self.spec.healthchecks.passive.unhealthy.interval) + : true' + - message: spec.hashOnFallback must not be set when spec.hashOn.cookie is + set. + rule: 'has(self.spec.hashOn) && has(self.spec.hashOn.cookie) ? !has(self.spec.hashOnFallback) + : true' + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.13.0 diff --git a/charts/kong/kong/templates/_helpers.tpl b/charts/kong/kong/templates/_helpers.tpl index 8736b4994..bd2f83d5f 100644 --- a/charts/kong/kong/templates/_helpers.tpl +++ b/charts/kong/kong/templates/_helpers.tpl @@ -1261,6 +1261,24 @@ role sets used in the charts. Updating these requires separating out cluster resource roles into their separate templates. */}} {{- define "kong.kubernetesRBACRules" -}} +{{- if (semverCompare ">= 3.0.0" (include "kong.effectiveVersion" .Values.ingressController.image)) }} +- apiGroups: + - configuration.konghq.com + resources: + - kongupstreampolicies + verbs: + - get + - list + - watch +- apiGroups: + - configuration.konghq.com + resources: + - kongupstreampolicies/status + verbs: + - get + - patch + - update +{{- end }} {{- if (semverCompare ">= 2.11.0" (include "kong.effectiveVersion" .Values.ingressController.image)) }} - apiGroups: - configuration.konghq.com @@ -1437,7 +1455,7 @@ resource roles into their separate templates. - get - patch - update -{{- if or (.Capabilities.APIVersions.Has "gateway.networking.k8s.io/v1alpha2") (.Capabilities.APIVersions.Has "gateway.networking.k8s.io/v1beta1") }} +{{- if or (.Capabilities.APIVersions.Has "gateway.networking.k8s.io/v1alpha2") (.Capabilities.APIVersions.Has "gateway.networking.k8s.io/v1beta1") (.Capabilities.APIVersions.Has "gateway.networking.k8s.io/v1")}} - apiGroups: - gateway.networking.k8s.io resources: @@ -1620,7 +1638,7 @@ Kubernetes Cluster-scoped resources it uses to build Kong configuration. - list - watch {{- end }} -{{- if or (.Capabilities.APIVersions.Has "gateway.networking.k8s.io/v1alpha2") (.Capabilities.APIVersions.Has "gateway.networking.k8s.io/v1beta1") }} +{{- if or (.Capabilities.APIVersions.Has "gateway.networking.k8s.io/v1alpha2") (.Capabilities.APIVersions.Has "gateway.networking.k8s.io/v1beta1") (.Capabilities.APIVersions.Has "gateway.networking.k8s.io/v1")}} - apiGroups: - gateway.networking.k8s.io resources: diff --git a/charts/kong/kong/templates/admission-webhook.yaml b/charts/kong/kong/templates/admission-webhook.yaml index bb7cb3f42..1be937fbb 100644 --- a/charts/kong/kong/templates/admission-webhook.yaml +++ b/charts/kong/kong/templates/admission-webhook.yaml @@ -86,6 +86,9 @@ webhooks: - UPDATE resources: - secrets +{{- if (semverCompare ">= 3.0.0" (include "kong.effectiveVersion" .Values.ingressController.image)) }} + - services +{{- end }} {{- if (semverCompare ">= 2.12.0" (include "kong.effectiveVersion" .Values.ingressController.image)) }} - apiGroups: - networking.k8s.io @@ -101,6 +104,7 @@ webhooks: apiVersions: - 'v1alpha2' - 'v1beta1' + - 'v1' operations: - CREATE - UPDATE diff --git a/charts/kong/kong/templates/servicemonitor.yaml b/charts/kong/kong/templates/servicemonitor.yaml index b0f8b4d3a..db3dfbf35 100644 --- a/charts/kong/kong/templates/servicemonitor.yaml +++ b/charts/kong/kong/templates/servicemonitor.yaml @@ -24,7 +24,7 @@ spec: {{- if .Values.serviceMonitor.metricRelabelings }} metricRelabelings: {{ toYaml .Values.serviceMonitor.metricRelabelings | nindent 6 }} {{- end }} - {{ if (semverCompare ">= 2.0.0" (include "kong.effectiveVersion" .Values.ingressController.image)) -}} + {{- if and .Values.ingressController.enabled (semverCompare ">= 2.0.0" (include "kong.effectiveVersion" .Values.ingressController.image)) }} - targetPort: cmetrics scheme: http {{- if .Values.serviceMonitor.interval }} diff --git a/charts/kong/kong/values.yaml b/charts/kong/kong/values.yaml index 115c73b2a..20bf519d0 100644 --- a/charts/kong/kong/values.yaml +++ b/charts/kong/kong/values.yaml @@ -510,13 +510,13 @@ dblessConfig: # ----------------------------------------------------------------------------- # Kong Ingress Controller's primary purpose is to satisfy Ingress resources -# created in k8s. It uses CRDs for more fine grained control over routing and +# created in k8s. It uses CRDs for more fine grained control over routing and # for Kong specific configuration. ingressController: enabled: true image: repository: kong/kubernetes-ingress-controller - tag: "2.12" + tag: "3.0" # Optionally set a semantic version for version-gated features. This can normally # be left unset. You only need to set this if your tag is not a semver string, # such as when you are using a "next" tag. Set this to the effective semantic diff --git a/charts/kubecost/cost-analyzer/Chart.yaml b/charts/kubecost/cost-analyzer/Chart.yaml index 83de92f93..8fcdffb9f 100644 --- a/charts/kubecost/cost-analyzer/Chart.yaml +++ b/charts/kubecost/cost-analyzer/Chart.yaml @@ -7,7 +7,7 @@ annotations: catalog.cattle.io/featured: "1" catalog.cattle.io/release-name: cost-analyzer apiVersion: v2 -appVersion: 1.106.4 +appVersion: 1.107.0 dependencies: - condition: global.grafana.enabled name: grafana @@ -25,4 +25,4 @@ description: A Helm chart that sets up Kubecost, Prometheus, and Grafana to moni cloud costs. icon: https://partner-charts.rancher.io/assets/logos/kubecost.png name: cost-analyzer -version: 1.106.4 +version: 1.107.0 diff --git a/charts/kubecost/cost-analyzer/README.md b/charts/kubecost/cost-analyzer/README.md index cc07efaf7..3121c8eab 100644 --- a/charts/kubecost/cost-analyzer/README.md +++ b/charts/kubecost/cost-analyzer/README.md @@ -1,12 +1,36 @@ -# Kubecost helm chart -Helm chart for the Kubecost project, which is created to monitor and manage Kubernetes resource spend. Please contact team@kubecost.com or visit [kubecost.com](http://kubecost.com) for more info. +# Kubecost Helm chart -While Helm is the [recommended install path](http://kubecost.com/install), these resources can also be deployed with the following command: +This is the official Helm chart for [Kubecost](https://www.kubecost.com/), an enterprise-grade application to monitor and manage Kubernetes spend. Please see the [website](https://www.kubecost.com/) for more details on what Kubecost can do for you and the official documentation [here](https://docs.kubecost.com/), or contact [team@kubecost.com](mailto:team@kubecost.com) for assistance. -`kubectl apply -f https://raw.githubusercontent.com/kubecost/cost-analyzer-helm-chart/master/kubecost.yaml --namespace kubecost` +To install via Helm, run the following command. -
-The following table lists the commonly used configurable parameters of the Kubecost Helm chart and their default values. +```sh +helm upgrade --install kubecost -n kubecost --create-namespace \ + --repo https://kubecost.github.io/cost-analyzer/ cost-analyzer \ + --set kubecostToken="aGVsbUBrdWJlY29zdC5jb20=xm343yadf98" +``` + +Alternatively, add the Helm repository first and scan for updates. + +```sh +helm repo add kubecost https://kubecost.github.io/cost-analyzer/ +helm repo update +``` + +Next, install the chart. + +```sh +helm install kubecost kubecost/cost-analyzer -n kubecost --create-namespace \ + --set kubecostToken="aGVsbUBrdWJlY29zdC5jb20=xm343yadf98" +``` + +While Helm is the [recommended install path](http://kubecost.com/install) for Kubecost especially in production, Kubecost can alternatively be deployed with a single-file manifest using the following command. Keep in mind when choosing this method, Kubecost will be installed from a development branch and may include unreleased changes. + +```sh +kubectl apply -f https://raw.githubusercontent.com/kubecost/cost-analyzer-helm-chart/develop/kubecost.yaml +``` + +The following table lists commonly used configuration parameters for the Kubecost Helm chart and their default values. Please see the [values file](values.yaml) for the complete set of definable values. | Parameter | Description | Default | |------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------| @@ -55,16 +79,23 @@ The following table lists the commonly used configurable parameters of the Kubec | `clusterController.fqdn` | Customize the upstream cluster controller FQDN | `computed in terms of the service name and namespace` | | `global.grafana.fqdn` | Customize the upstream grafana FQDN | `computed in terms of the release name and namespace` | -## Testing -To perform local testing do next: -- install locally [kind](https://github.com/kubernetes-sigs/kind) according to documentation. -- install locally [ct](https://github.com/helm/chart-testing) according to documentation. -- create local cluster using `kind` \ -use image version from [kind docker registry](https://hub.docker.com/r/kindest/node/tags?page=1) -```shell -kind create cluster --image kindest/node: -``` -- perform ct execution -```shell -ct install --chart-dirs="." --charts="." --helm-repo-extra-args="--set=global.prometheus.enabled=false --set=global.grafana.enabled=false" +## Adjusting Log Output + +The log output can be customized during deployment by using the `LOG_LEVEL` and/or `LOG_FORMAT` environment variables. + +### Adjusting Log Level + +Adjusting the log level increases or decreases the level of verbosity written to the logs. To set the log level to `trace`, the following flag can be added to the `helm` command. + +```sh +--set 'kubecostModel.extraEnv[0].name=LOG_LEVEL,kubecostModel.extraEnv[0].value=trace' ``` + +### Adjusting Log Format + +Adjusting the log format changes the format in which the logs are output making it easier for log aggregators to parse and display logged messages. The `LOG_FORMAT` environment variable accepts the values `JSON`, for a structured output, and `pretty` for a nice, human-readable output. + +| Value | Output | +|--------|----------------------------------------------------------------------------------------------------------------------------| +| `JSON` | `{"level":"info","time":"2006-01-02T15:04:05.999999999Z07:00","message":"Starting cost-model (git commit \"1.91.0-rc.0\")"}` | +| `pretty` | `2006-01-02T15:04:05.999999999Z07:00 INF Starting cost-model (git commit "1.91.0-rc.0")` | diff --git a/charts/kubecost/cost-analyzer/attached-disks.json b/charts/kubecost/cost-analyzer/attached-disks.json index 0badbedd3..717a951f4 100644 --- a/charts/kubecost/cost-analyzer/attached-disks.json +++ b/charts/kubecost/cost-analyzer/attached-disks.json @@ -112,7 +112,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "sum(container_fs_limit_bytes{instance=~'$disk', device!=\"tmpfs\", id=\"/\", cluster_id=~'$cluster'}) by (cluster_id, instance)", + "expr": "max(container_fs_limit_bytes{instance=~'$disk', device!=\"tmpfs\", id=\"/\", cluster_id=~'$cluster'}) by (cluster_id, instance)", "format": "time_series", "interval": "", "intervalFactor": 1, @@ -209,7 +209,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "sum(container_fs_usage_bytes{instance=~'$disk',id=\"/\", cluster_id=~'$cluster'}) by (cluster_id, instance) / sum(container_fs_limit_bytes{instance=~'$disk',device!=\"tmpfs\", id=\"/\", cluster_id=~'$cluster'}) by (cluster_id,instance)", + "expr": "sum(container_fs_usage_bytes{instance=~'$disk',id=\"/\", cluster_id=~'$cluster'}) by (cluster_id, instance) / max(container_fs_limit_bytes{instance=~'$disk',device!=\"tmpfs\", id=\"/\", cluster_id=~'$cluster'}) by (cluster_id,instance)", "format": "time_series", "interval": "", "intervalFactor": 1, diff --git a/charts/kubecost/cost-analyzer/charts/grafana/templates/deployment.yaml b/charts/kubecost/cost-analyzer/charts/grafana/templates/deployment.yaml index 403733820..de83b48a0 100644 --- a/charts/kubecost/cost-analyzer/charts/grafana/templates/deployment.yaml +++ b/charts/kubecost/cost-analyzer/charts/grafana/templates/deployment.yaml @@ -41,9 +41,12 @@ spec: {{- if .Values.schedulerName }} schedulerName: "{{ .Values.schedulerName }}" {{- end }} -{{- if .Values.securityContext }} +{{- if .Values.global.securityContext }} securityContext: -{{ toYaml .Values.securityContext | indent 8 }} +{{- toYaml .Values.global.securityContext | nindent 8 }} +{{- else if .Values.securityContext }} + securityContext: +{{- toYaml .Values.securityContext | nindent 8 }} {{- end }} {{- if .Values.priorityClassName }} priorityClassName: "{{ .Values.priorityClassName }}" @@ -54,6 +57,10 @@ spec: image: "{{ .Values.downloadDashboardsImage.repository }}:{{ .Values.downloadDashboardsImage.tag }}" imagePullPolicy: {{ .Values.downloadDashboardsImage.pullPolicy }} command: ["sh", "/etc/grafana/download_dashboards.sh"] + {{- with .Values.global.containerSecurityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} volumeMounts: - name: config mountPath: "/etc/grafana/download_dashboards.sh" @@ -78,6 +85,10 @@ spec: - name: {{ template "grafana.name" . }}-sc-dashboard image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" imagePullPolicy: {{ .Values.sidecar.image.pullPolicy }} + {{- if .Values.global.containerSecurityContext }} + securityContext: + {{- toYaml .Values.global.containerSecurityContext | nindent 12 -}} + {{- end }} env: - name: LABEL value: "{{ .Values.sidecar.dashboards.label }}" @@ -95,6 +106,10 @@ spec: - name: {{ template "grafana.name" . }}-sc-datasources image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" imagePullPolicy: {{ .Values.sidecar.image.pullPolicy }} + {{- with .Values.global.containerSecurityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} env: - name: LABEL value: "{{ .Values.sidecar.datasources.label }}" @@ -111,6 +126,10 @@ spec: - name: {{ .Chart.Name }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- with .Values.global.containerSecurityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} volumeMounts: - name: config mountPath: "/etc/grafana/grafana.ini" diff --git a/charts/kubecost/cost-analyzer/charts/grafana/values.yaml b/charts/kubecost/cost-analyzer/charts/grafana/values.yaml index ffb2d8349..3375a8404 100644 --- a/charts/kubecost/cost-analyzer/charts/grafana/values.yaml +++ b/charts/kubecost/cost-analyzer/charts/grafana/values.yaml @@ -258,7 +258,7 @@ smtp: sidecar: image: repository: kiwigrid/k8s-sidecar - tag: 1.25.0 + tag: 1.25.1 pullPolicy: IfNotPresent resources: # limits: diff --git a/charts/kubecost/cost-analyzer/charts/prometheus/README.md b/charts/kubecost/cost-analyzer/charts/prometheus/README.md index 0f636b204..bb8fded41 100644 --- a/charts/kubecost/cost-analyzer/charts/prometheus/README.md +++ b/charts/kubecost/cost-analyzer/charts/prometheus/README.md @@ -183,8 +183,8 @@ Parameter | Description | Default `configmapReload.prometheus.enabled` | If false, the configmap-reload container for Prometheus will not be deployed | `true` `configmapReload.prometheus.containerSecurityContext` | securityContext for container | `{}` `configmapReload.prometheus.name` | configmap-reload container name | `configmap-reload` -`configmapReload.prometheus.image.repository` | configmap-reload container image repository | `jimmidyson/configmap-reload` -`configmapReload.prometheus.image.tag` | configmap-reload container image tag | `v0.5.0` +`configmapReload.prometheus.image.repository` | configmap-reload container image repository | `quay.io/prometheus-operator/prometheus-config-reloader` +`configmapReload.prometheus.image.tag` | configmap-reload container image tag | `v0.68.0` `configmapReload.prometheus.image.pullPolicy` | configmap-reload container image pull policy | `IfNotPresent` `configmapReload.prometheus.extraArgs` | Additional configmap-reload container arguments | `{}` `configmapReload.prometheus.extraVolumeDirs` | Additional configmap-reload volume directories | `{}` @@ -192,8 +192,8 @@ Parameter | Description | Default `configmapReload.prometheus.resources` | configmap-reload pod resource requests & limits | `{}` `configmapReload.alertmanager.enabled` | If false, the configmap-reload container for AlertManager will not be deployed | `true` `configmapReload.alertmanager.name` | configmap-reload container name | `configmap-reload` -`configmapReload.alertmanager.image.repository` | configmap-reload container image repository | `jimmidyson/configmap-reload` -`configmapReload.alertmanager.image.tag` | configmap-reload container image tag | `v0.5.0` +`configmapReload.alertmanager.image.repository` | configmap-reload container image repository | `quay.io/prometheus-operator/prometheus-config-reloader` +`configmapReload.alertmanager.image.tag` | configmap-reload container image tag | `v0.68.0` `configmapReload.alertmanager.image.pullPolicy` | configmap-reload container image pull policy | `IfNotPresent` `configmapReload.alertmanager.extraArgs` | Additional configmap-reload container arguments | `{}` `configmapReload.alertmanager.extraVolumeDirs` | Additional configmap-reload volume directories | `{}` diff --git a/charts/kubecost/cost-analyzer/charts/prometheus/templates/server-deployment.yaml b/charts/kubecost/cost-analyzer/charts/prometheus/templates/server-deployment.yaml index 6e0f9513d..4924f9136 100644 --- a/charts/kubecost/cost-analyzer/charts/prometheus/templates/server-deployment.yaml +++ b/charts/kubecost/cost-analyzer/charts/prometheus/templates/server-deployment.yaml @@ -50,8 +50,8 @@ spec: image: "{{ .Values.configmapReload.prometheus.image.repository }}:{{ .Values.configmapReload.prometheus.image.tag }}" imagePullPolicy: "{{ .Values.configmapReload.prometheus.image.pullPolicy }}" args: - - --volume-dir=/etc/config - - --webhook-url=http://127.0.0.1:9090{{ .Values.server.prefixURL }}/-/reload + - --watched-dir=/etc/config + - --reload-url=http://127.0.0.1:9090{{ .Values.server.prefixURL }}/-/reload {{- range $key, $value := .Values.configmapReload.prometheus.extraArgs }} - --{{ $key }}={{ $value }} {{- end }} @@ -59,10 +59,15 @@ spec: - --volume-dir={{ . }} {{- end }} resources: -{{ toYaml .Values.configmapReload.prometheus.resources | indent 12 }} - {{- with .Values.configmapReload.prometheus.containerSecurityContext }} + {{- toYaml .Values.configmapReload.prometheus.resources | nindent 12 }} securityContext: - {{- toYaml . | nindent 12 }} + {{- if .Values.global.containerSecurityContext }} + {{- toYaml .Values.global.containerSecurityContext | nindent 12 }} + {{- else if .Values.global.containerSecurityContext }} + {{- toYaml .Values.global.containerSecurityContext | nindent 12 }} + {{- else }} + securityContext: + {{- toYaml .Values.configmapReload.prometheus.containerSecurityContext | nindent 12 }} {{- end }} volumeMounts: {{- if .Values.selfsignedCertConfigMapName }} @@ -129,10 +134,12 @@ spec: failureThreshold: {{ .Values.server.livenessProbeFailureThreshold }} successThreshold: {{ .Values.server.livenessProbeSuccessThreshold }} resources: -{{ toYaml .Values.server.resources | indent 12 }} - {{- with .Values.server.containerSecurityContext }} + {{- toYaml .Values.server.resources | nindent 12 }} securityContext: - {{- toYaml . | nindent 12 }} + {{- if .Values.global.containerSecurityContext }} + {{- toYaml .Values.global.containerSecurityContext | nindent 12 }} + {{- else }} + {{- toYaml .Values.server.prometheus.containerSecurityContext | nindent 12 }} {{- end }} volumeMounts: - name: config-volume @@ -170,11 +177,14 @@ spec: {{- end }} {{- if .Values.server.nodeSelector }} nodeSelector: -{{ toYaml .Values.server.nodeSelector | indent 8 }} + {{- toYaml .Values.server.nodeSelector | nindent 8 }} {{- end }} - {{- if .Values.server.securityContext }} + {{- if .Values.global.securityContext }} securityContext: -{{ toYaml .Values.server.securityContext | indent 8 }} + {{- toYaml .Values.global.securityContext | nindent 8 }} + {{- else if .Values.server.securityContext }} + securityContext2: + {{- toYaml .Values.server.securityContext | nindent 8 }} {{- end }} {{- if .Values.server.tolerations }} tolerations: diff --git a/charts/kubecost/cost-analyzer/charts/prometheus/templates/server-statefulset.yaml b/charts/kubecost/cost-analyzer/charts/prometheus/templates/server-statefulset.yaml index 2f25a94ad..d121c2696 100644 --- a/charts/kubecost/cost-analyzer/charts/prometheus/templates/server-statefulset.yaml +++ b/charts/kubecost/cost-analyzer/charts/prometheus/templates/server-statefulset.yaml @@ -51,8 +51,8 @@ spec: image: "{{ .Values.configmapReload.prometheus.image.repository }}:{{ .Values.configmapReload.prometheus.image.tag }}" imagePullPolicy: "{{ .Values.configmapReload.prometheus.image.pullPolicy }}" args: - - --volume-dir=/etc/config - - --webhook-url=http://127.0.0.1:9090{{ .Values.server.prefixURL }}/-/reload + - --watched-dir=/etc/config + - --reload-url=http://127.0.0.1:9090{{ .Values.server.prefixURL }}/-/reload {{- range $key, $value := .Values.configmapReload.prometheus.extraArgs }} - --{{ $key }}={{ $value }} {{- end }} diff --git a/charts/kubecost/cost-analyzer/charts/prometheus/values.yaml b/charts/kubecost/cost-analyzer/charts/prometheus/values.yaml index af4a1b90f..0df64f922 100644 --- a/charts/kubecost/cost-analyzer/charts/prometheus/values.yaml +++ b/charts/kubecost/cost-analyzer/charts/prometheus/values.yaml @@ -307,8 +307,6 @@ alertmanager: type: ClusterIP ## Monitors ConfigMap changes and POSTs to a URL -## Ref: https://github.com/jimmidyson/configmap-reload -## configmapReload: prometheus: ## If false, the configmap-reload container will not be deployed @@ -322,8 +320,8 @@ configmapReload: ## configmap-reload container image ## image: - repository: jimmidyson/configmap-reload - tag: v0.7.1 + repository: quay.io/prometheus-operator/prometheus-config-reloader + tag: v0.68.0 pullPolicy: IfNotPresent ## Additional configmap-reload container arguments @@ -362,8 +360,8 @@ configmapReload: ## configmap-reload container image ## image: - repository: jimmidyson/configmap-reload - tag: v0.7.1 + repository: quay.io/prometheus-operator/prometheus-config-reloader + tag: v0.68.0 pullPolicy: IfNotPresent ## Additional configmap-reload container arguments @@ -1327,98 +1325,6 @@ serverFiles: regex: (container_cpu_allocation|container_cpu_usage_seconds_total|container_fs_limit_bytes|container_fs_writes_bytes_total|container_gpu_allocation|container_memory_allocation_bytes|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|DCGM_FI_DEV_GPU_UTIL|deployment_match_labels|kube_daemonset_status_desired_number_scheduled|kube_daemonset_status_number_ready|kube_deployment_spec_replicas|kube_deployment_status_replicas|kube_deployment_status_replicas_available|kube_job_status_failed|kube_namespace_annotations|kube_namespace_labels|kube_node_info|kube_node_labels|kube_node_status_allocatable|kube_node_status_allocatable_cpu_cores|kube_node_status_allocatable_memory_bytes|kube_node_status_capacity|kube_node_status_capacity_cpu_cores|kube_node_status_capacity_memory_bytes|kube_node_status_condition|kube_persistentvolume_capacity_bytes|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_pod_container_info|kube_pod_container_resource_limits|kube_pod_container_resource_limits_cpu_cores|kube_pod_container_resource_limits_memory_bytes|kube_pod_container_resource_requests|kube_pod_container_resource_requests_cpu_cores|kube_pod_container_resource_requests_memory_bytes|kube_pod_container_status_restarts_total|kube_pod_container_status_running|kube_pod_container_status_terminated_reason|kube_pod_labels|kube_pod_owner|kube_pod_status_phase|kube_replicaset_owner|kube_statefulset_replicas|kube_statefulset_status_replicas|kubecost_cluster_info|kubecost_cluster_management_cost|kubecost_cluster_memory_working_set_bytes|kubecost_load_balancer_cost|kubecost_network_internet_egress_cost|kubecost_network_region_egress_cost|kubecost_network_zone_egress_cost|kubecost_node_is_spot|kubecost_pod_network_egress_bytes_total|node_cpu_hourly_cost|node_cpu_seconds_total|node_disk_reads_completed|node_disk_reads_completed_total|node_disk_writes_completed|node_disk_writes_completed_total|node_filesystem_device_error|node_gpu_count|node_gpu_hourly_cost|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_network_transmit_bytes_total|node_ram_hourly_cost|node_total_hourly_cost|pod_pvc_allocation|pv_hourly_cost|service_selector_labels|statefulSet_match_labels|kubecost_pv_info|up) action: keep - # Scrape config for slow service endpoints; same as above, but with a larger - # timeout and a larger interval - # - # The relabeling allows the actual service scrape endpoint to be configured - # via the following annotations: - # - # * `prometheus.io/scrape-slow`: Only scrape services that have a value of `true` - # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need - # to set this to `https` & most likely set the `tls_config` of the scrape config. - # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. - # * `prometheus.io/port`: If the metrics are exposed on a different port to the - # service then set this appropriately. - - job_name: 'kubernetes-service-endpoints-slow' - - scrape_interval: 5m - scrape_timeout: 30s - - kubernetes_sd_configs: - - role: endpoints - - relabel_configs: - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape_slow] - action: keep - regex: true - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] - action: replace - target_label: __scheme__ - regex: (https?) - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] - action: replace - target_label: __metrics_path__ - regex: (.+) - - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] - action: replace - target_label: __address__ - regex: ([^:]+)(?::\d+)?;(\d+) - replacement: $1:$2 - - action: labelmap - regex: __meta_kubernetes_service_label_(.+) - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: kubernetes_namespace - - source_labels: [__meta_kubernetes_service_name] - action: replace - target_label: kubernetes_name - - source_labels: [__meta_kubernetes_pod_node_name] - action: replace - target_label: kubernetes_node - - - job_name: 'prometheus-pushgateway' - honor_labels: true - - kubernetes_sd_configs: - - role: service - - relabel_configs: - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe] - action: keep - regex: pushgateway - - # Example scrape config for probing services via the Blackbox Exporter. - # - # The relabeling allows the actual service scrape endpoint to be configured - # via the following annotations: - # - # * `prometheus.io/probe`: Only probe services that have a value of `true` - - job_name: 'kubernetes-services' - - metrics_path: /probe - params: - module: [http_2xx] - - kubernetes_sd_configs: - - role: service - - relabel_configs: - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe] - action: keep - regex: true - - source_labels: [__address__] - target_label: __param_target - - target_label: __address__ - replacement: blackbox - - source_labels: [__param_target] - target_label: instance - - action: labelmap - regex: __meta_kubernetes_service_label_(.+) - - source_labels: [__meta_kubernetes_namespace] - target_label: kubernetes_namespace - - source_labels: [__meta_kubernetes_service_name] - target_label: kubernetes_name - # adds additional scrape configs to prometheus.yml # must be a string so you have to add a | after extraScrapeConfigs: # example adds prometheus-blackbox-exporter scrape config diff --git a/charts/kubecost/cost-analyzer/cluster-utilization.json b/charts/kubecost/cost-analyzer/cluster-utilization.json index 9007618b0..45090d9fc 100644 --- a/charts/kubecost/cost-analyzer/cluster-utilization.json +++ b/charts/kubecost/cost-analyzer/cluster-utilization.json @@ -613,7 +613,7 @@ "pluginVersion": "8.3.2", "targets": [ { - "expr": "SUM(container_memory_usage_bytes{namespace!=\"\"}) / SUM(kube_node_status_allocatable{resource=\"memory\", unit=\"byte\"}) * 100", + "expr": "SUM(container_memory_working_set_bytes{name!=\"POD\", container!=\"\", namespace!=\"\"}) / SUM(kube_node_status_allocatable{resource=\"memory\", unit=\"byte\"}) * 100", "format": "time_series", "interval": "", "intervalFactor": 1, diff --git a/charts/kubecost/cost-analyzer/scripts/create-admission-controller-tls.sh b/charts/kubecost/cost-analyzer/scripts/create-admission-controller-tls.sh index 8f0d1c32b..2290cadd1 100644 --- a/charts/kubecost/cost-analyzer/scripts/create-admission-controller-tls.sh +++ b/charts/kubecost/cost-analyzer/scripts/create-admission-controller-tls.sh @@ -1,24 +1,29 @@ #!/bin/bash -namespace=$1 -if [ "$namespace" == "" ]; then +set -eo pipefail + +if [ -z "$1" ]; then namespace=kubecost +else + namespace="$1" fi -DIRECTORY=$(cd `dirname $0` && pwd) - -echo "Creating certificates" +echo -e "\nCreating certificates ..." mkdir certs openssl genrsa -out certs/tls.key 2048 -openssl req -new -key certs/tls.key -out certs/tls.csr -subj "/CN=webhook-server.$namespace.svc" -openssl x509 -req -days 500 -extfile <(printf "subjectAltName=DNS:webhook-server.$namespace.svc") -in certs/tls.csr -signkey certs/tls.key -out certs/tls.crt +openssl req -new -key certs/tls.key -out certs/tls.csr -subj "/CN=webhook-server.${namespace}.svc" +openssl x509 -req -days 500 -extfile <(printf "subjectAltName=DNS:webhook-server.%s.svc" "${namespace}") -in certs/tls.csr -signkey certs/tls.key -out certs/tls.crt -echo "Creating Webhook Server TLS Secret" +echo -e "\nCreating Webhook Server TLS Secret ..." kubectl create secret tls webhook-server-tls \ --cert "certs/tls.crt" \ - --key "certs/tls.key" -n $namespace + --key "certs/tls.key" -n "${namespace}" +ENCODED_CA=$(base64 < certs/tls.crt | tr -d '\n') -echo "Updating values.yaml" -ENCODED_CA=$(cat certs/tls.crt | base64 | tr -d '\n') -sed -i 's@${CA_BUNDLE}@'"$ENCODED_CA"'@g' ../values.yaml +if [ -f "../values.yaml" ]; then + echo -e "\nUpdating values.yaml ..." + sed -i '' 's@${CA_BUNDLE}@'"${ENCODED_CA}"'@g' ../values.yaml +else + echo -e "\nThe CA bundle to use in your values file is: \n${ENCODED_CA}" +fi \ No newline at end of file diff --git a/charts/kubecost/cost-analyzer/templates/NOTES.txt b/charts/kubecost/cost-analyzer/templates/NOTES.txt index 3958f66dc..75da274ec 100644 --- a/charts/kubecost/cost-analyzer/templates/NOTES.txt +++ b/charts/kubecost/cost-analyzer/templates/NOTES.txt @@ -1,14 +1,13 @@ -------------------------------------------------- -{{- $node := (lookup "v1" "Node" "" "") }} {{- $isEKS := (regexMatch ".*eks.*" (.Capabilities.KubeVersion | quote) )}} {{- $isGT22 := (semverCompare ">=1.23-0" .Capabilities.KubeVersion.GitVersion) }} {{- $PVNotExists := (empty (lookup "v1" "PersistentVolume" "" "")) }} {{- $EBSCSINotExists := (empty (lookup "apps/v1" "Deployment" "kube-system" "ebs-csi-controller")) }} -{{- $servicePort := .Values.service.port | default 9090 -}} -Kubecost has been successfully installed. +{{- $servicePort := .Values.service.port | default 9090 }} +Kubecost {{ .Chart.Version }} has been successfully installed. {{ if (and $isEKS $isGT22) -}} @@ -28,14 +27,12 @@ ERROR: MISSING EBS-CSI DRIVER WHICH IS REQUIRED ON EKS v1.23+ TO MANAGE PERSISTE Please allow 5-10 minutes for Kubecost to gather metrics. -If you have configured cloud-integrations, it can take up to 48 hours for cost reconciliation to occur. - -When using Durable storage (Enterprise Edition), please allow up to 4 hours for data to be collected and the UI to be healthy. +When configured, cost reconciliation with cloud provider billing data will have a 48 hour delay. When pods are Ready, you can enable port-forwarding with the following command: kubectl port-forward --namespace {{ .Release.Namespace }} deployment/{{ template "cost-analyzer.fullname" . }} {{ $servicePort }} -Next, navigate to http://localhost:{{ $servicePort }} in a web browser. +Then, navigate to http://localhost:{{ $servicePort }} in a web browser. Having installation issues? View our Troubleshooting Guide at http://docs.kubecost.com/troubleshoot-install diff --git a/charts/kubecost/cost-analyzer/templates/_helpers.tpl b/charts/kubecost/cost-analyzer/templates/_helpers.tpl index bd24f670f..a03e62de8 100644 --- a/charts/kubecost/cost-analyzer/templates/_helpers.tpl +++ b/charts/kubecost/cost-analyzer/templates/_helpers.tpl @@ -11,6 +11,15 @@ Expand the name of the chart. {{- define "federator.name" -}} {{- default "federator" | trunc 63 | trimSuffix "-" -}} {{- end -}} +{{- define "aggregator.name" -}} +{{- default "aggregator" | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- define "cloudCost.name" -}} +{{- default "cloud-cost" | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- define "etlUtils.name" -}} +{{- default "etl-utils" | trunc 63 | trimSuffix "-" -}} +{{- end -}} {{/* Create a default fully qualified app name. @@ -42,6 +51,18 @@ If release name contains chart name it will be used as a full name. {{- printf "%s-%s" .Release.Name "federator" | trunc 63 | trimSuffix "-" -}} {{- end -}} +{{- define "aggregator.fullname" -}} +{{- printf "%s-%s" .Release.Name "aggregator" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "cloudCost.fullname" -}} +{{- printf "%s-%s" .Release.Name (include "cloudCost.name" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "etlUtils.fullname" -}} +{{- printf "%s-%s" .Release.Name (include "etlUtils.name" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} + {{/* Create the fully qualified name for Prometheus server service. */}} @@ -88,6 +109,47 @@ Create the fully qualified name for Prometheus alertmanager service. {{- printf "%s-%s" .Release.Name "query-service-load-balancer" | trunc 63 | trimSuffix "-" -}} {{- end -}} +{{- define "aggregator.serviceName" -}} +{{- printf "%s-%s" .Release.Name "aggregator" | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- define "cloudCost.serviceName" -}} +{{ include "cloudCost.fullname" . }} +{{- end -}} +{{- define "etlUtils.serviceName" -}} +{{ include "etlUtils.fullname" . }} +{{- end -}} + +{{/* +Create the name of the service account +*/}} +{{- define "cost-analyzer.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "cost-analyzer.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} +{{- define "query-service.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "query-service.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} +{{- define "aggregator.serviceAccountName" -}} +{{- if .Values.kubecostAggregator.serviceAccountName -}} + {{ .Values.kubecostAggregator.serviceAccountName }} +{{- else -}} + {{ template "cost-analyzer.serviceAccountName" . }} +{{- end -}} +{{- end -}} +{{- define "cloudCost.serviceAccountName" -}} +{{- if .Values.kubecostAggregator.cloudCost.serviceAccountName -}} + {{ .Values.kubecostAggregator.cloudCost.serviceAccountName }} +{{- else -}} + {{ template "cost-analyzer.serviceAccountName" . }} +{{- end -}} +{{- end -}} {{/* Network Costs name used to tie autodiscovery of metrics to daemon set pods */}} @@ -109,9 +171,20 @@ Network Costs name used to tie autodiscovery of metrics to daemon set pods {{- end }} {{- end -}} +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "cost-analyzer.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + {{/* Create the chart labels. */}} +{{- define "cost-analyzer.chartLabels" -}} +helm.sh/chart: {{ include "cost-analyzer.chart" . }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} {{- define "kubecost.chartLabels" -}} app.kubernetes.io/name: {{ include "cost-analyzer.name" . }} helm.sh/chart: {{ include "cost-analyzer.chart" . }} @@ -130,32 +203,13 @@ helm.sh/chart: {{ include "cost-analyzer.chart" . }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} {{- end -}} - - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "cost-analyzer.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- define "kubecost.aggregator.chartLabels" -}} +app.kubernetes.io/name: {{ include "aggregator.name" . }} +helm.sh/chart: {{ include "cost-analyzer.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} {{- end -}} -{{/* -Create the name of the service account -*/}} -{{- define "cost-analyzer.serviceAccountName" -}} -{{- if .Values.serviceAccount.create -}} - {{ default (include "cost-analyzer.fullname" .) .Values.serviceAccount.name }} -{{- else -}} - {{ default "default" .Values.serviceAccount.name }} -{{- end -}} -{{- end -}} -{{- define "query-service.serviceAccountName" -}} -{{- if .Values.serviceAccount.create -}} - {{ default (include "query-service.fullname" .) .Values.serviceAccount.name }} -{{- else -}} - {{ default "default" .Values.serviceAccount.name }} -{{- end -}} -{{- end -}} {{/* Create the common labels. @@ -175,6 +229,18 @@ app: query-service {{ include "kubecost.federator.chartLabels" . }} app: federator {{- end -}} +{{- define "aggregator.commonLabels" -}} +{{ include "cost-analyzer.chartLabels" . }} +app: aggregator +{{- end -}} +{{- define "cloudCost.commonLabels" -}} +{{ include "cost-analyzer.chartLabels" . }} +{{ include "cloudCost.selectorLabels" . }} +{{- end -}} +{{- define "etlUtils.commonLabels" -}} +{{ include "cost-analyzer.chartLabels" . }} +{{ include "etlUtils.selectorLabels" . }} +{{- end -}} {{/* Create the selector labels. @@ -194,6 +260,21 @@ app.kubernetes.io/name: {{ include "federator.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} app: federator {{- end -}} +{{- define "aggregator.selectorLabels" -}} +app.kubernetes.io/name: {{ include "aggregator.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app: aggregator +{{- end -}} +{{- define "cloudCost.selectorLabels" -}} +app.kubernetes.io/name: {{ include "cloudCost.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app: {{ include "cloudCost.name" . }} +{{- end -}} +{{- define "etlUtils.selectorLabels" -}} +app.kubernetes.io/name: {{ include "etlUtils.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app: {{ include "etlUtils.name" . }} +{{- end -}} {{/* Return the appropriate apiVersion for daemonset. @@ -241,9 +322,9 @@ Return the appropriate apiVersion for podsecuritypolicy. {{/* Recursive filter which accepts a map containing an input map (.v) and an output map (.r). The template -will traverse all values inside .v recursively writing non-map values to the output .r. If a nested map -is discovered, we look for an 'enabled' key. If it doesn't exist, we continue traversing the -map. If it does exist, we omit the inner map traversal iff enabled is false. This filter writes the +will traverse all values inside .v recursively writing non-map values to the output .r. If a nested map +is discovered, we look for an 'enabled' key. If it doesn't exist, we continue traversing the +map. If it does exist, we omit the inner map traversal iff enabled is false. This filter writes the enabled only version to the output .r */}} {{- define "cost-analyzer.filter" -}} @@ -281,8 +362,8 @@ The implied use case is {{ template "cost-analyzer.filterEnabled" .Values }} {{/* This template runs the full check for leader/follower requirements in order to determine -whether it should be configured. This template will return true if it's enabled and all -requirements are met. +whether it should be configured. This template will return true if it's enabled and all +requirements are met. */}} {{- define "cost-analyzer.leaderFollowerEnabled" }} {{- if .Values.kubecostDeployment }} @@ -307,4 +388,4 @@ requirements are met. {{- else }} {{- "" }} {{- end }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/kubecost/cost-analyzer/templates/aggregator-cloud-cost-deployment.yaml b/charts/kubecost/cost-analyzer/templates/aggregator-cloud-cost-deployment.yaml new file mode 100644 index 000000000..0478f8712 --- /dev/null +++ b/charts/kubecost/cost-analyzer/templates/aggregator-cloud-cost-deployment.yaml @@ -0,0 +1,140 @@ +{{- if .Values.kubecostAggregator.cloudCost.enabled }} + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "cloudCost.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{ include "cloudCost.commonLabels" . | nindent 4 }} +spec: + replicas: 1 + selector: + matchLabels: + {{ include "cloudCost.selectorLabels" . | nindent 6 }} + strategy: + type: Recreate + template: + metadata: + labels: + app.kubernetes.io/name: cloud-cost + app.kubernetes.io/instance: {{ .Release.Name }} + app: cloud-cost + spec: + restartPolicy: Always + serviceAccountName: {{ template "cloudCost.serviceAccountName" . }} + volumes: + {{- if .Values.kubecostModel.etlBucketConfigSecret }} + - name: etl-bucket-config + secret: + defaultMode: 420 + secretName: {{ .Values.kubecostModel.etlBucketConfigSecret }} + {{- end }} + {{- if .Values.kubecostModel.federatedStorageConfigSecret }} + - name: federated-storage-config + secret: + defaultMode: 420 + secretName: {{ .Values.kubecostModel.federatedStorageConfigSecret }} + {{- end }} + {{- if .Values.kubecostProductConfigs.cloudIntegrationSecret }} + - name: cloud-integration + secret: + secretName: {{ .Values.kubecostProductConfigs.cloudIntegrationSecret }} + items: + - key: cloud-integration.json + path: cloud-integration.json + {{- else }} + {{- fail "Cloud Cost requires configuration secret" }} + {{- end }} + containers: + - name: cloud-cost + {{- if .Values.kubecostModel }} + {{- if .Values.kubecostModel.openSourceOnly }} + {{- fail "Kubecost Aggregator cannot be used with open source only" }} + {{- else if .Values.kubecostAggregator.fullImageName }} + image: {{ .Values.kubecostAggregator.fullImageName }} + {{- else if .Values.kubecostModel.fullImageName }} + image: {{ .Values.kubecostModel.fullImageName }} + {{- else if .Values.imageVersion }} + image: {{ .Values.kubecostModel.image }}:{{ .Values.imageVersion }} + {{- else }} + image: {{ .Values.kubecostModel.image }}:prod-{{ $.Chart.AppVersion }} + {{ end }} + {{- else }} + image: gcr.io/kubecost1/cost-model:prod-{{ $.Chart.AppVersion }} + {{ end }} + readinessProbe: + httpGet: + path: /healthz + port: 9005 + initialDelaySeconds: 10 + periodSeconds: 5 + failureThreshold: 200 + imagePullPolicy: Always + args: ["cloud-cost"] + ports: + - name: tcp-api + containerPort: 9005 + protocol: TCP + resources: + {{- toYaml .Values.kubecostAggregator.cloudCost.resources | nindent 12 }} + volumeMounts: + {{- if .Values.kubecostModel.federatedStorageConfigSecret }} + - name: federated-storage-config + mountPath: /var/configs/etl/federated + readOnly: true + {{- end }} + {{- if .Values.kubecostModel.etlBucketConfigSecret }} + - name: etl-bucket-config + mountPath: /var/configs/etl + readOnly: true + {{- end }} + {{- if .Values.kubecostProductConfigs.cloudIntegrationSecret }} + - name: cloud-integration + mountPath: /var/configs/cloud-integration + {{- end }} + env: + - name: CONFIG_PATH + value: /var/configs/ + {{- if .Values.kubecostModel.etlBucketConfigSecret }} + - name: ETL_BUCKET_CONFIG + value: "/var/configs/etl/object-store.yaml" + {{- end}} + {{- if .Values.kubecostModel.federatedStorageConfigSecret }} + - name: FEDERATED_STORE_CONFIG + value: "/var/configs/etl/federated/federated-store.yaml" + - name: FEDERATED_CLUSTER + value: "true" + {{- end}} + {{- range $key, $value := .Values.kubecostAggregator.cloudCost.env }} + - name: {{ $key | quote }} + value: {{ $value | quote }} + {{- end }} + + + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + {{ toYaml .Values.imagePullSecrets | indent 2 }} + {{- end }} + {{- if .Values.kubecostAggregator.priority }} + {{- if .Values.kubecostAggregator.priority.enabled }} + {{- if .Values.kubecostAggregator.priority.name }} + priorityClassName: {{ .Values.kubecostAggregator.priority.name }} + {{- else }} + priorityClassName: {{ template "cost-analyzer.fullname" . }}-aggregator-priority + {{- end }} + {{- end }} + {{- end }} + {{- with .Values.kubecostAggregator.cloudCost.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.kubecostAggregator.cloudCost.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.kubecostAggregator.cloudCost.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} diff --git a/charts/kubecost/cost-analyzer/templates/aggregator-cloud-cost-service-account.yaml b/charts/kubecost/cost-analyzer/templates/aggregator-cloud-cost-service-account.yaml new file mode 100644 index 000000000..3cfc37243 --- /dev/null +++ b/charts/kubecost/cost-analyzer/templates/aggregator-cloud-cost-service-account.yaml @@ -0,0 +1,15 @@ +{{- if .Values.kubecostAggregator.cloudCost.enabled }} +{{- if and .Values.serviceAccount.create .Values.kubecostAggregator.cloudCost.serviceAccountName }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "cloudCost.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + labels: + {{ include "cloudCost.commonLabels" . | nindent 4 }} +{{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/kubecost/cost-analyzer/templates/aggregator-cloud-cost-service.yaml b/charts/kubecost/cost-analyzer/templates/aggregator-cloud-cost-service.yaml new file mode 100644 index 000000000..a0ea7deba --- /dev/null +++ b/charts/kubecost/cost-analyzer/templates/aggregator-cloud-cost-service.yaml @@ -0,0 +1,18 @@ +{{- if .Values.kubecostAggregator.cloudCost.enabled }} + +kind: Service +apiVersion: v1 +metadata: + name: {{ template "cloudCost.serviceName" . }} + namespace: {{ .Release.Namespace }} + labels: +{{ include "cloudCost.commonLabels" . | nindent 4 }} +spec: + selector: +{{ include "cloudCost.selectorLabels" . | nindent 4 }} + type: "ClusterIP" + ports: + - name: tcp-api + port: 9005 + targetPort: 9005 +{{- end }} diff --git a/charts/kubecost/cost-analyzer/templates/aggregator-service.yaml b/charts/kubecost/cost-analyzer/templates/aggregator-service.yaml new file mode 100644 index 000000000..b2a7063c6 --- /dev/null +++ b/charts/kubecost/cost-analyzer/templates/aggregator-service.yaml @@ -0,0 +1,20 @@ +{{- if and (not .Values.agent) (not .Values.cloudAgent) (.Values.kubecostAggregator) }} +{{- if .Values.kubecostAggregator.enabled }} + +kind: Service +apiVersion: v1 +metadata: + name: {{ template "aggregator.serviceName" . }} + namespace: {{ .Release.Namespace }} + labels: +{{ include "aggregator.commonLabels" . | nindent 4 }} +spec: + selector: +{{ include "aggregator.selectorLabels" . | nindent 4 }} + type: "ClusterIP" + ports: + - name: tcp-api + port: 9004 + targetPort: 9004 +{{- end }} +{{- end }} diff --git a/charts/kubecost/cost-analyzer/templates/aggregator-statefulset.yaml b/charts/kubecost/cost-analyzer/templates/aggregator-statefulset.yaml new file mode 100644 index 000000000..e412fd6d4 --- /dev/null +++ b/charts/kubecost/cost-analyzer/templates/aggregator-statefulset.yaml @@ -0,0 +1,203 @@ +{{- if and (not .Values.agent) (not .Values.cloudAgent) (.Values.kubecostAggregator) }} +{{- if .Values.kubecostAggregator.enabled }} + +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "aggregator.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "aggregator.commonLabels" . | nindent 4 }} +spec: + replicas: {{ .Values.kubecostAggregator.replicas }} + serviceName: {{ template "aggregator.serviceName" . }} + selector: + matchLabels: + app.kubernetes.io/name: aggregator + app.kubernetes.io/instance: {{ .Release.Name }} + app: aggregator + volumeClaimTemplates: + - metadata: + name: persistent-configs + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: {{ .Values.kubecostAggregator.persistentConfigsStorage.storageClass }} + resources: + requests: + storage: {{ .Values.kubecostAggregator.persistentConfigsStorage.storageRequest }} + - metadata: + name: aggregator-storage + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: {{ .Values.kubecostAggregator.aggregatorStorage.storageClass }} + resources: + requests: + storage: {{ .Values.kubecostAggregator.aggregatorStorage.storageRequest }} + {{- if .Values.kubecostAggregator.aggregatorDbStorage }} + - metadata: + name: aggregator-db-storage + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: {{ .Values.kubecostAggregator.aggregatorDbStorage.storageClass }} + resources: + requests: + storage: {{ .Values.kubecostAggregator.aggregatorDbStorage.storageRequest }} + {{- end }} + template: + metadata: + labels: + app.kubernetes.io/name: aggregator + app.kubernetes.io/instance: {{ .Release.Name }} + app: aggregator + spec: + restartPolicy: Always + {{- if .Values.kubecostAggregator.securityContext }} + securityContext: + {{- toYaml .Values.kubecostAggregator.securityContext | nindent 8 }} + {{- else if .Values.global.securityContext }} + securityContext: + {{- toYaml .Values.global.securityContext | nindent 8 }} + {{ end }} + serviceAccountName: {{ template "aggregator.serviceAccountName" . }} + volumes: + {{- $etlBackupBucketSecret := "" }} + {{- if .Values.kubecostModel.federatedStorageConfigSecret }} + {{- $etlBackupBucketSecret = .Values.kubecostModel.federatedStorageConfigSecret }} + {{- end }} + {{- if $etlBackupBucketSecret }} + - name: bucket-config + secret: + defaultMode: 420 + secretName: {{ $etlBackupBucketSecret }} + {{- end }} + containers: + {{- if .Values.kubecostAggregator.jaeger.enabled }} + - name: embedded-jaeger + securityContext: + {{- toYaml .Values.kubecostAggregator.jaeger.containerSecurityContext | nindent 12 }} + image: {{ .Values.kubecostAggregator.jaeger.image }}:{{ .Values.kubecostAggregator.jaeger.imageVersion }} + {{- end }} + - name: aggregator + {{- if .Values.kubecostAggregator.containerSecurityContext }} + securityContext: + {{- toYaml .Values.kubecostAggregator.containerSecurityContext | nindent 12 }} + {{- else if .Values.global.containerSecurityContext }} + securityContext: + {{- toYaml .Values.global.containerSecurityContext | nindent 12 }} + {{ end }} + {{- if .Values.kubecostModel }} + {{- if .Values.kubecostModel.openSourceOnly }} + {{- fail "Kubecost Aggregator cannot be used with open source only" }} + {{- else if .Values.kubecostAggregator.fullImageName }} + image: {{ .Values.kubecostAggregator.fullImageName }} + {{- else if .Values.kubecostModel.fullImageName }} + image: {{ .Values.kubecostModel.fullImageName }} + {{- else if .Values.imageVersion }} + image: {{ .Values.kubecostModel.image }}:{{ .Values.imageVersion }} + {{- else }} + image: {{ .Values.kubecostModel.image }}:prod-{{ $.Chart.AppVersion }} + {{ end }} + {{- else }} + image: gcr.io/kubecost1/cost-model:prod-{{ $.Chart.AppVersion }} + {{ end }} + readinessProbe: + httpGet: + path: /healthz + port: 9004 + initialDelaySeconds: 10 + periodSeconds: 5 + failureThreshold: 200 + imagePullPolicy: Always + args: ["waterfowl"] + ports: + - name: tcp-api + containerPort: 9004 + protocol: TCP + resources: + {{ toYaml .Values.kubecostAggregator.resources | nindent 12 }} + volumeMounts: + - name: persistent-configs + mountPath: /var/configs + - name: bucket-config + mountPath: /var/configs/etl + - name: aggregator-storage + mountPath: /var/configs/waterfowl + {{- if .Values.kubecostAggregator.aggregatorDbStorage }} + - name: aggregator-db-storage + mountPath: /var/configs/waterfowl/duckdb + {{- end }} + env: + {{- if and (.Values.prometheus.server.global.external_labels.cluster_id) (not .Values.prometheus.server.clusterIDConfigmap) }} + - name: CLUSTER_ID + value: {{ .Values.prometheus.server.global.external_labels.cluster_id }} + {{- end }} + {{- if .Values.prometheus.server.clusterIDConfigmap }} + - name: CLUSTER_ID + valueFrom: + configMapKeyRef: + name: {{ .Values.prometheus.server.clusterIDConfigmap }} + key: CLUSTER_ID + {{- end }} + {{- if .Values.kubecostAggregator.jaeger.enabled }} + - name: TRACING_URL + value: "http://localhost:14268/api/traces" + {{- end }} + - name: CONFIG_PATH + value: /var/configs/ + - name: ETL_ENABLED + value: "false" # this pod should never run KC's concept of "ETL" + - name: CLOUD_PROVIDER_API_KEY + value: "AIzaSyDXQPG_MHUEy9neR7stolq6l0ujXmjJlvk" # The GCP Pricing API key.This GCP api key is expected to be here and is limited to accessing google's billing API.' + value: "true" # just in case, not sure if necessary + + {{- if $etlBackupBucketSecret }} + # If this isn't set, we pretty much have to be in a read only state, + # initialization will probably fail otherwise. + - name: ETL_BUCKET_CONFIG + {{- if not .Values.kubecostModel.federatedStorageConfigSecret}} + value: "/var/configs/etl/object-store.yaml" + {{- else }} + value: "/var/configs/etl/federated-store.yaml" + - name: FEDERATED_STORE_CONFIG + value: "/var/configs/etl/federated-store.yaml" + - name: FEDERATED_PRIMARY_CLUSTER # this ensures the ingester runs assuming federated primary paths in the bucket + value: "true" + - name: FEDERATED_CLUSTER # this ensures the ingester runs assuming federated primary paths in the bucket + value: "true" + {{- end }} + {{- end }} + + {{- range $key, $value := .Values.kubecostAggregator.env }} + - name: {{ $key | quote }} + value: {{ $value | quote }} + {{- end }} + - name: KUBECOST_NAMESPACE + value: {{ .Release.Namespace }} + + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + {{ toYaml .Values.imagePullSecrets | indent 2 }} + {{- end }} + {{- if .Values.kubecostAggregator.priority }} + {{- if .Values.kubecostAggregator.priority.enabled }} + {{- if .Values.kubecostAggregator.priority.name }} + priorityClassName: {{ .Values.kubecostAggregator.priority.name }} + {{- else }} + priorityClassName: {{ template "cost-analyzer.fullname" . }}-aggregator-priority + {{- end }} + {{- end }} + {{- end }} + {{- with .Values.kubecostAggregator.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.kubecostAggregator.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.kubecostAggregator.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} +{{- end }} diff --git a/charts/kubecost/cost-analyzer/templates/cost-analyzer-cluster-role-template.yaml b/charts/kubecost/cost-analyzer/templates/cost-analyzer-cluster-role-template.yaml index e8c2a7a40..94d25aaa0 100644 --- a/charts/kubecost/cost-analyzer/templates/cost-analyzer-cluster-role-template.yaml +++ b/charts/kubecost/cost-analyzer/templates/cost-analyzer-cluster-role-template.yaml @@ -29,7 +29,6 @@ rules: - '' resources: - configmaps - - deployments - nodes - pods - events @@ -45,16 +44,6 @@ rules: - get - list - watch - - apiGroups: - - extensions - resources: - - daemonsets - - deployments - - replicasets - verbs: - - get - - list - - watch {{- $isLeaderFollowerEnabled := include "cost-analyzer.leaderFollowerEnabled" . }} {{- if $isLeaderFollowerEnabled }} - apiGroups: @@ -72,6 +61,7 @@ rules: - daemonsets - replicasets verbs: + - get - list - watch - apiGroups: diff --git a/charts/kubecost/cost-analyzer/templates/cost-analyzer-deployment-template.yaml b/charts/kubecost/cost-analyzer/templates/cost-analyzer-deployment-template.yaml index 62565d5a4..e7166aeca 100644 --- a/charts/kubecost/cost-analyzer/templates/cost-analyzer-deployment-template.yaml +++ b/charts/kubecost/cost-analyzer/templates/cost-analyzer-deployment-template.yaml @@ -62,6 +62,9 @@ spec: {{- else if lt $nginxPort 1025 }} securityContext: runAsUser: 0 + {{- else if .Values.global.securityContext }} + securityContext: + {{- toYaml .Values.global.securityContext | nindent 8 }} {{- else }} securityContext: runAsUser: 1001 @@ -71,25 +74,27 @@ spec: restartPolicy: Always serviceAccountName: {{ template "cost-analyzer.serviceAccountName" . }} volumes: - {{- if .Values.global.gcpstore.enabled }} + {{- if .Values.global.gcpstore.enabled }} - name: ubbagent-config configMap: name: ubbagent-config - {{- end }} - {{- if .Values.hosted }} + {{- end }} + {{- if .Values.hosted }} - name: config-store secret: defaultMode: 420 secretName: kubecost-thanos - {{- end }} + {{- end }} - name: tmp emptyDir: {} + {{- if .Values.kubecostFrontend.enabled }} - name: nginx-conf configMap: name: nginx-conf items: - key: nginx.conf path: default.conf + {{- end }} {{- if .Values.global.containerSecuritycontext }} - name: var-run emptyDir: { } @@ -135,7 +140,7 @@ spec: secret: secretName: {{ .Values.kubecostProductConfigs.gcpSecretName }} items: - - key: compute-viewer-kubecost-key.json + - key: {{ .Values.kubecostProductConfigs.gcpSecretKeyName | default "compute-viewer-kubecost-key.json" }} path: service-key.json {{- end }} {{- end -}} @@ -243,11 +248,16 @@ spec: - name: oidc-config configMap: name: {{ template "cost-analyzer.fullname" . }}-oidc - {{- if .Values.oidc.secretName }} + {{- if and (not .Values.oidc.existingCustomSecret.enabled) .Values.oidc.secretName }} - name: oidc-client-secret secret: secretName: {{ .Values.oidc.secretName }} {{- end }} + {{- if .Values.oidc.existingCustomSecret.enabled }} + - name: oidc-client-secret + secret: + secretName: {{ .Values.oidc.existingCustomSecret.name }} + {{- end }} {{- end }} {{- end }} {{- if .Values.extraVolumes }} @@ -325,6 +335,21 @@ spec: args: - "--web.listen-address=:{{ .Values.global.gmp.gmpProxy.port }}" - "--query.project-id={{ .Values.global.gmp.gmpProxy.projectId }}" + {{- if .Values.systemProxy.enabled }} + env: + - name: HTTP_PROXY + value: {{ .Values.systemProxy.httpProxyUrl }} + - name: http_proxy + value: {{ .Values.systemProxy.httpProxyUrl }} + - name: HTTPS_PROXY + value: {{ .Values.systemProxy.httpsProxyUrl }} + - name: https_proxy + value: {{ .Values.systemProxy.httpsProxyUrl }} + - name: NO_PROXY + value: {{ .Values.systemProxy.noProxy }} + - name: no_proxy + value: {{ .Values.systemProxy.noProxy }} + {{- end }} ports: - name: web containerPort: {{ .Values.global.gmp.gmpProxy.port | int }} @@ -345,6 +370,10 @@ spec: {{- else }} imagePullPolicy: Always {{- end }} + {{- if .Values.global.containerSecurityContext }} + securityContext: + {{- toYaml .Values.global.containerSecurityContext | nindent 12 -}} + {{- end }} args: - --name - {{ .Values.sigV4Proxy.name }} @@ -361,8 +390,24 @@ spec: ports: - name: aws-sigv4-proxy containerPort: {{ .Values.sigV4Proxy.port | int }} - {{- if .Values.sigV4Proxy.extraEnv }} env: + - name: AGENT_LOCAL_PORT + value: "{{ .Values.sigV4Proxy.port | int }}" + {{- if .Values.systemProxy.enabled }} + - name: HTTP_PROXY + value: {{ .Values.systemProxy.httpProxyUrl }} + - name: http_proxy + value: {{ .Values.systemProxy.httpProxyUrl }} + - name: HTTPS_PROXY + value: {{ .Values.systemProxy.httpsProxyUrl }} + - name: https_proxy + value: {{ .Values.systemProxy.httpsProxyUrl }} + - name: NO_PROXY + value: {{ .Values.systemProxy.noProxy }} + - name: no_proxy + value: {{ .Values.systemProxy.noProxy }} + {{- end }} + {{- if .Values.sigV4Proxy.extraEnv }} {{- toYaml .Values.sigV4Proxy.extraEnv | nindent 10 }} {{- end }} {{- end }} @@ -370,6 +415,20 @@ spec: - name: ubbagent image: gcr.io/kubecost1/gcp-mp/ent/cost-model/ubbagent:1.0 env: + {{- if .Values.systemProxy.enabled }} + - name: HTTP_PROXY + value: {{ .Values.systemProxy.httpProxyUrl }} + - name: http_proxy + value: {{ .Values.systemProxy.httpProxyUrl }} + - name: HTTPS_PROXY + value: {{ .Values.systemProxy.httpsProxyUrl }} + - name: https_proxy + value: {{ .Values.systemProxy.httpsProxyUrl }} + - name: NO_PROXY + value: {{ .Values.systemProxy.noProxy }} + - name: no_proxy + value: {{ .Values.systemProxy.noProxy }} + {{- end }} - name: AGENT_CONFIG_FILE value: "/etc/ubbagent/config.yaml" - name: AGENT_LOCAL_PORT @@ -406,10 +465,12 @@ spec: args: {{- toYaml .Values.kubecostModel.extraArgs | nindent 12 }} {{- end }} - {{- if .Values.kubecostModel.securityContext }} securityContext: + {{- if .Values.kubecostModel.securityContext }} {{- toYaml .Values.kubecostModel.securityContext | nindent 12 -}} - {{ end }} + {{- else if .Values.global.containerSecurityContext }} + {{- toYaml .Values.global.containerSecurityContext | nindent 12 -}} + {{- end }} {{- if .Values.kubecostModel.imagePullPolicy }} imagePullPolicy: {{ .Values.kubecostModel.imagePullPolicy }} {{- else }} @@ -643,11 +704,11 @@ spec: value: {{ (quote .Values.kubecostProductConfigs.regionOverrides) }} {{- end }} {{- end }} - - name: REMOTE_WRITE_PASSWORD - value: {{ .Values.remoteWrite.postgres.auth.password }} {{- if .Values.remoteWrite.postgres.enabled }} - name: REMOTE_WRITE_ENABLED value: "true" + - name: REMOTE_WRITE_PASSWORD + value: {{ .Values.remoteWrite.postgres.auth.password }} {{- end }} {{- if .Values.global.thanos.queryServiceBasicAuthSecretName}} - name: MC_BASIC_AUTH_USERNAME @@ -1042,11 +1103,18 @@ spec: configMapKeyRef: name: {{ template "cost-analyzer.fullname" . }} key: kubecost-token + {{- if .Values.kubecostAggregator.enabled }} + - name: WATERFOWL_ENABLED + value: "true" + {{- end }} + {{- if .Values.kubecostFrontend.enabled }} {{- if .Values.kubecostFrontend }} {{- if .Values.kubecostFrontend.fullImageName }} - image: {{ .Values.kubecostFrontend.fullImageName }} {{- else if .Values.imageVersion }} - image: {{ .Values.kubecostFrontend.image }}:{{ .Values.imageVersion }} + {{- else if .Values.kubecostAggregator.enabled }} + - image: {{ .Values.kubecostFrontend.image }}:prod-aggregator-{{ $.Chart.AppVersion }} {{- else }} - image: {{ .Values.kubecostFrontend.image }}:prod-{{ $.Chart.AppVersion }} {{ end }} @@ -1070,7 +1138,10 @@ spec: {{- if .Values.kubecostFrontend.securityContext }} securityContext: {{- toYaml .Values.kubecostFrontend.securityContext | nindent 12 }} - {{ end }} + {{- else if .Values.global.containerSecurityContext }} + securityContext: + {{- toYaml .Values.global.containerSecurityContext | nindent 12 }} + {{- end }} volumeMounts: - name: tmp mountPath: /tmp @@ -1115,6 +1186,7 @@ spec: securityContext: {{- toYaml .Values.global.containerSecuritycontext | nindent 12 }} {{- end }} + {{ end }} {{- if .Values.imagePullSecrets }} imagePullSecrets: {{ toYaml .Values.imagePullSecrets | indent 2 }} diff --git a/charts/kubecost/cost-analyzer/templates/cost-analyzer-frontend-config-map-template.yaml b/charts/kubecost/cost-analyzer/templates/cost-analyzer-frontend-config-map-template.yaml index 713c6a98d..3894f70ac 100644 --- a/charts/kubecost/cost-analyzer/templates/cost-analyzer-frontend-config-map-template.yaml +++ b/charts/kubecost/cost-analyzer/templates/cost-analyzer-frontend-config-map-template.yaml @@ -1,3 +1,4 @@ +{{- if .Values.kubecostFrontend.enabled }} {{- if and (not .Values.agent) (not .Values.cloudAgent) }} {{- $serviceName := include "cost-analyzer.serviceName" . -}} {{- $nginxPort := .Values.service.targetPort | default 9090 -}} @@ -107,6 +108,16 @@ data: } {{- end }} + {{- if .Values.kubecostAggregator.enabled }} + upstream aggregator { + server {{ .Release.Name }}-aggregator.{{ .Release.Namespace }}:9004; + } + {{- end }} + {{- if .Values.kubecostAggregator.cloudCost.enabled }} + upstream cloudCost { + server {{ template "cloudCost.fullname" . }}.{{ .Release.Namespace }}:9005; + } + {{- end }} server { server_name _; root /var/www; @@ -301,7 +312,13 @@ data: } {{- end }} + # Query Service Replicas (QSR) proxy {{- if and (.Values.kubecostDeployment) (.Values.kubecostDeployment.queryServiceReplicas) (gt (.Values.kubecostDeployment.queryServiceReplicas | toString | atoi) 0) }} + + {{- if and (not .Values.agent) (not .Values.cloudAgent) (.Values.kubecostDeployment) (.Values.kubecostAggregator) .Values.kubecostAggregator.enabled }} + {{- fail "Query Service Replicas should not be used at the same time as the Kubecost Aggregator" }} + {{- end }} + location /model/allocation { proxy_connect_timeout 600; proxy_send_timeout 600; @@ -325,7 +342,338 @@ data: proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; } + + # to get memory profile from query service need to prefix all request by queryservice/ + # for example if you want heap dump from query service end point should be + # /model/queryservice/debug/pprof/heap to get queryservice heap dumps + location ~ /model/queryservice/(.*)$ { + proxy_connect_timeout 600; + proxy_send_timeout 600; + proxy_read_timeout 600; + proxy_pass http://queryservice/$1; + proxy_redirect off; + proxy_http_version 1.1; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } {{- end }} + +{{- if and (not .Values.agent) (not .Values.cloudAgent) (.Values.kubecostDeployment) (.Values.kubecostAggregator) .Values.kubecostAggregator.enabled }} + + # Aggregator proxy + {{- if and (.Values.kubecostDeployment) (.Values.kubecostDeployment.queryServiceReplicas) (gt (.Values.kubecostDeployment.queryServiceReplicas | toString | atoi) 0) }} + {{- fail "The Kubecost Aggregator should not be used at the same time as Query Service Replicas" }} + {{- end }} + + location = /model/allocation { + proxy_read_timeout 300; + proxy_pass http://aggregator/allocation; + proxy_redirect off; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + {{- if not .Values.kubecostFrontend.trendsDisabled }} + location = /model/allocation/trends { + proxy_read_timeout 300; + proxy_pass http://aggregator/allocation/trends; + proxy_redirect off; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + {{ end }} + location = /model/allocation/view { + proxy_read_timeout 300; + proxy_pass http://aggregator/allocation/view; + proxy_redirect off; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + location = /model/allocation/summary { + proxy_read_timeout 300; + proxy_pass http://aggregator/allocation/summary; + proxy_redirect off; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + location = /model/allocation/summary/topline { + proxy_read_timeout 300; + proxy_pass http://aggregator/allocation/summary/topline; + proxy_redirect off; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + location = /model/assets { + proxy_read_timeout 300; + proxy_pass http://aggregator/assets; + proxy_redirect off; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + location = /model/assets/topline { + proxy_read_timeout 300; + proxy_pass http://aggregator/assets/topline; + proxy_redirect off; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + location = /model/assets/graph { + proxy_read_timeout 300; + proxy_pass http://aggregator/assets/graph; + proxy_redirect off; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + location = /model/assets/totals { + return 501 "Aggregator does not support this endpoint."; + } + location = /model/assets/diff { + return 501 "Aggregator does not support this endpoint."; + } + location = /model/assets/breakdown { + return 501 "Aggregator does not support this endpoint."; + } + location = /model/savings/requestSizingV2 { + proxy_read_timeout 300; + proxy_pass http://aggregator/savings/requestSizingV2; + proxy_redirect off; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + location = /model/savings/requestSizingV2/topline { + proxy_read_timeout 300; + proxy_pass http://aggregator/savings/requestSizingV2/topline; + proxy_redirect off; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + location = /model/cloudCost { + proxy_read_timeout 300; + proxy_pass http://aggregator/cloudCost; + proxy_redirect off; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + location = /model/cloudCost/view/graph { + proxy_read_timeout 300; + proxy_pass http://aggregator/cloudCost/view/graph; + proxy_redirect off; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + location = /model/cloudCost/view/totals { + proxy_read_timeout 300; + proxy_pass http://aggregator/cloudCost/view/totals; + proxy_redirect off; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + location = /model/cloudCost/view/table { + proxy_read_timeout 300; + proxy_pass http://aggregator/cloudCost/view/table; + proxy_redirect off; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + location = /model/clusters/status { + proxy_read_timeout 300; + proxy_pass http://aggregator/clusters/status; + proxy_redirect off; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + location = /model/savings { + proxy_read_timeout 300; + proxy_pass http://aggregator/savings; + proxy_redirect off; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + location = /model/savings/abandonedWorkloads { + proxy_read_timeout 300; + proxy_pass http://aggregator/savings/abandonedWorkloads; + proxy_redirect off; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + location = /model/savings/abandonedWorkloads/topline { + proxy_read_timeout 300; + proxy_pass http://aggregator/savings/abandonedWorkloads/topline; + proxy_redirect off; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + location = /model/savings/unclaimedVolumes { + proxy_read_timeout 300; + proxy_pass http://aggregator/savings/unclaimedVolumes; + proxy_redirect off; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + location = /model/savings/localLowDisks { + proxy_read_timeout 300; + proxy_pass http://aggregator/savings/localLowDisks; + proxy_redirect off; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + location = /model/savings/persistentVolumeSizing { + proxy_read_timeout 300; + proxy_pass http://aggregator/savings/persistentVolumeSizing; + proxy_redirect off; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + location = /model/reports/allocation { + proxy_read_timeout 300; + proxy_pass http://aggregator/reports/allocation; + proxy_redirect off; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + location = /model/reports/asset { + proxy_read_timeout 300; + proxy_pass http://aggregator/reports/asset; + proxy_redirect off; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + location = /model/reports/advanced { + proxy_read_timeout 300; + proxy_pass http://aggregator/reports/advanced; + proxy_redirect off; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + location = /model/reports/cloudCost { + proxy_read_timeout 300; + proxy_pass http://aggregator/reports/cloudCost; + proxy_redirect off; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + location = /model/reports/group { + proxy_read_timeout 300; + proxy_pass http://aggregator/reports/group; + proxy_redirect off; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + # this is a special case to handle /reports/group/:group in the Kubecost Aggregator. prior to aggregator, this endpoint + # was handled by /model/, so no special case proxies were required. without this, /model/reports/groups/?foo=bar + # will be directed to /reports/groups?foo=bar (note the missing /model prefix) + location ~ ^/model/reports/group/ { + proxy_read_timeout 300; + proxy_pass http://aggregator/reports/group/$is_args$args; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + location = /model/budget { + proxy_read_timeout 300; + proxy_pass http://aggregator/budget; + proxy_redirect off; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + location = /model/budgets { + proxy_read_timeout 300; + proxy_pass http://aggregator/budgets; + proxy_redirect off; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + {{- end }} + location = /model/hideDiagnostics { + default_type text/html; + {{- if .Values.kubecostFrontend.hideDiagnostics }} + return 200 'true'; + {{- else }} + return 200 'false'; + {{- end }} + } + + {{- if .Values.kubecostAggregator.cloudCost.enabled }} + location = /model/cloudCost/status { + proxy_read_timeout 300; + proxy_pass http://cloudCost/cloudCost/status; + proxy_redirect off; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + location = /model/cloudCost/rebuild { + proxy_read_timeout 300; + proxy_pass http://cloudCost/cloudCost/rebuild; + proxy_redirect off; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + location = /model/cloudCost/repair { + proxy_read_timeout 300; + proxy_pass http://cloudCost/cloudCost/repair; + proxy_redirect off; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + location = /model/cloudCost/integration/export { + proxy_read_timeout 300; + proxy_pass http://cloudCost/cloudCost/integration/export; + proxy_redirect off; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + location = /model/cloudCost/integration/enable { + proxy_read_timeout 300; + proxy_pass http://cloudCost/cloudCost/integration/enable; + proxy_redirect off; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + location = /model/cloudCost/integration/disable { + proxy_read_timeout 300; + proxy_pass http://cloudCost/cloudCost/integration/disable; + proxy_redirect off; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + {{- end }} + + + {{- if .Values.kubecostFrontend.trendsDisabled }} location /model/allocation/trends { return 204 'endpoint disabled'; @@ -333,3 +681,4 @@ data: {{ end }} } {{- end }} +{{- end }} diff --git a/charts/kubecost/cost-analyzer/templates/cost-analyzer-oidc-config-map-template.yaml b/charts/kubecost/cost-analyzer/templates/cost-analyzer-oidc-config-map-template.yaml index a97df0f2b..7a2c2e69f 100644 --- a/charts/kubecost/cost-analyzer/templates/cost-analyzer-oidc-config-map-template.yaml +++ b/charts/kubecost/cost-analyzer/templates/cost-analyzer-oidc-config-map-template.yaml @@ -14,7 +14,11 @@ data: "enabled" : {{ .Values.oidc.enabled }}, "useIDToken" : {{ .Values.oidc.useIDToken | default "false" }}, "clientID" : "{{ .Values.oidc.clientID }}", + {{- if .Values.oidc.existingCustomSecret.enabled }} + "secretName" : "{{ .Values.oidc.existingCustomSecret.name }}", + {{- else }} "secretName" : "{{ .Values.oidc.secretName }}", + {{- end }} "authURL" : "{{ .Values.oidc.authURL }}", "loginRedirectURL" : "{{ .Values.oidc.loginRedirectURL }}", "discoveryURL" : "{{ .Values.oidc.discoveryURL }}", @@ -41,4 +45,4 @@ data: } } {{- end -}} -{{- end -}} \ No newline at end of file +{{- end -}} diff --git a/charts/kubecost/cost-analyzer/templates/etl-utils-deployment.yaml b/charts/kubecost/cost-analyzer/templates/etl-utils-deployment.yaml new file mode 100644 index 000000000..9e5b5b2cd --- /dev/null +++ b/charts/kubecost/cost-analyzer/templates/etl-utils-deployment.yaml @@ -0,0 +1,111 @@ +{{- if .Values.etlUtils.enabled }} + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "etlUtils.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{ include "etlUtils.commonLabels" . | nindent 4 }} +spec: + replicas: 1 + selector: + matchLabels: + {{ include "etlUtils.selectorLabels" . | nindent 6 }} + strategy: + type: Recreate + template: + metadata: + labels: + app.kubernetes.io/name: {{ template "etlUtils.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app: {{ template "etlUtils.name" . }} + spec: + restartPolicy: Always + volumes: + {{- if .Values.etlUtils.thanosSourceBucketSecret }} + - name: etl-bucket-config + secret: + defaultMode: 420 + secretName: {{ .Values.etlUtils.thanosSourceBucketSecret }} + {{- end }} + {{- if .Values.kubecostModel.federatedStorageConfigSecret }} + - name: federated-storage-config + secret: + defaultMode: 420 + secretName: {{ .Values.kubecostModel.federatedStorageConfigSecret }} + {{- end }} + serviceAccountName: {{ template "cost-analyzer.serviceAccountName" . }} + containers: + - name: {{ template "etlUtils.name" . }} + {{- if .Values.kubecostModel }} + {{- if .Values.kubecostModel.openSourceOnly }} + {{- fail "ETL Utils cannot be used with open source only" }} + {{- else if .Values.etlUtils.fullImageName }} + image: {{ .Values.etlUtils.fullImageName }} + {{- else if .Values.kubecostModel.fullImageName }} + image: {{ .Values.kubecostModel.fullImageName }} + {{- else if .Values.imageVersion }} + image: {{ .Values.kubecostModel.image }}:{{ .Values.imageVersion }} + {{- else }} + image: {{ .Values.kubecostModel.image }}:prod-{{ $.Chart.AppVersion }} + {{ end }} + {{- else }} + image: gcr.io/kubecost1/cost-model:prod-{{ $.Chart.AppVersion }} + {{ end }} + readinessProbe: + httpGet: + path: /healthz + port: 9006 + initialDelaySeconds: 10 + periodSeconds: 5 + failureThreshold: 200 + livenessProbe: + httpGet: + path: /healthz + port: 9006 + initialDelaySeconds: 10 + periodSeconds: 5 + imagePullPolicy: Always + args: ["etl-utils"] + ports: + - name: api + containerPort: 9006 + protocol: TCP + resources: + {{- toYaml .Values.etlUtils.resources | nindent 12 }} + volumeMounts: + {{- if .Values.etlUtils.thanosSourceBucketSecret }} + - name: etl-bucket-config + mountPath: /var/configs/etl + readOnly: true + {{- end }} + env: + - name: CONFIG_PATH + value: /var/configs/ + {{- if .Values.etlUtils.thanosSourceBucketSecret }} + - name: ETL_BUCKET_CONFIG + value: "/var/configs/etl/object-store.yaml" + {{- end}} + {{- range $key, $value := .Values.etlUtils.env }} + - name: {{ $key | quote }} + value: {{ $value | quote }} + {{- end }} + + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + {{ toYaml .Values.imagePullSecrets | indent 2 }} + {{- end }} + {{- with .Values.etlUtils.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.etlUtils.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.etlUtils.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} diff --git a/charts/kubecost/cost-analyzer/templates/etl-utils-service.yaml b/charts/kubecost/cost-analyzer/templates/etl-utils-service.yaml new file mode 100644 index 000000000..8296d7faa --- /dev/null +++ b/charts/kubecost/cost-analyzer/templates/etl-utils-service.yaml @@ -0,0 +1,18 @@ +{{- if .Values.etlUtils.enabled }} + +kind: Service +apiVersion: v1 +metadata: + name: {{ template "etlUtils.serviceName" . }} + namespace: {{ .Release.Namespace }} + labels: +{{ include "etlUtils.commonLabels" . | nindent 4 }} +spec: + selector: +{{ include "etlUtils.selectorLabels" . | nindent 4 }} + type: "ClusterIP" + ports: + - name: api + port: 9006 + targetPort: 9006 +{{- end }} diff --git a/charts/kubecost/cost-analyzer/templates/federator-deployment-template.yaml b/charts/kubecost/cost-analyzer/templates/federator-deployment-template.yaml index c3f73f21b..095644355 100644 --- a/charts/kubecost/cost-analyzer/templates/federator-deployment-template.yaml +++ b/charts/kubecost/cost-analyzer/templates/federator-deployment-template.yaml @@ -24,6 +24,10 @@ spec: {{- toYaml . | nindent 8 }} {{- end }} spec: + {{- if .Values.global.securityContext }} + securityContext: + {{- toYaml .Values.global.securityContext | nindent 8 }} + {{- end }} containers: - name: federator {{- if .Values.kubecostModel }} @@ -38,6 +42,10 @@ spec: image: gcr.io/kubecost1/cost-model:prod-{{ $.Chart.AppVersion }} {{- end }} imagePullPolicy: Always + {{- if .Values.global.containerSecurityContext }} + securityContext: + {{- toYaml .Values.global.containerSecurityContext | nindent 12 -}} + {{- end }} args: ["federator"] ports: - name: tcp-model @@ -46,9 +54,11 @@ spec: volumeMounts: - name: federator-config mountPath: /var/configs/federator + {{- if .Values.kubecostModel.federatedStorageConfigSecret }} - name: federated-storage-config mountPath: /var/configs/etl/federated readOnly: true + {{- end }} {{- if .Values.federatedETL.federator.extraVolumeMounts }} {{- toYaml .Values.federatedETL.federator.extraVolumeMounts | nindent 12 }} {{- end }} @@ -73,6 +83,20 @@ spec: {{- if .Values.federatedETL.federator.extraEnv }} {{- toYaml .Values.federatedETL.federator.extraEnv | nindent 12 }} {{- end }} + {{- if .Values.systemProxy.enabled }} + - name: HTTP_PROXY + value: {{ .Values.systemProxy.httpProxyUrl }} + - name: http_proxy + value: {{ .Values.systemProxy.httpProxyUrl }} + - name: HTTPS_PROXY + value: {{ .Values.systemProxy.httpsProxyUrl }} + - name: https_proxy + value: {{ .Values.systemProxy.httpsProxyUrl }} + - name: NO_PROXY + value: {{ .Values.systemProxy.noProxy }} + - name: no_proxy + value: {{ .Values.systemProxy.noProxy }} + {{- end }} restartPolicy: Always serviceAccountName: {{ template "cost-analyzer.serviceAccountName" . }} volumes: diff --git a/charts/kubecost/cost-analyzer/templates/kubecost-metrics-deployment-template.yaml b/charts/kubecost/cost-analyzer/templates/kubecost-metrics-deployment-template.yaml index 4056b6692..b0562f895 100644 --- a/charts/kubecost/cost-analyzer/templates/kubecost-metrics-deployment-template.yaml +++ b/charts/kubecost/cost-analyzer/templates/kubecost-metrics-deployment-template.yaml @@ -75,7 +75,7 @@ spec: secret: secretName: {{ .Values.kubecostProductConfigs.gcpSecretName }} items: - - key: compute-viewer-kubecost-key.json + - key: {{ .Values.kubecostProductConfigs.gcpSecretKeyName | default "compute-viewer-kubecost-key.json" }} path: service-key.json {{- end }} {{- if .Values.kubecostProductConfigs.serviceKeySecretName }} diff --git a/charts/kubecost/cost-analyzer/templates/kubecost-oidc-secret-template.yaml b/charts/kubecost/cost-analyzer/templates/kubecost-oidc-secret-template.yaml index 28c79e51a..381514512 100644 --- a/charts/kubecost/cost-analyzer/templates/kubecost-oidc-secret-template.yaml +++ b/charts/kubecost/cost-analyzer/templates/kubecost-oidc-secret-template.yaml @@ -1,5 +1,5 @@ {{- if .Values.oidc }} -{{- if .Values.oidc.secretName }} +{{- if and (not .Values.oidc.existingCustomSecret.enabled) .Values.oidc.secretName }} {{- if .Values.oidc.clientSecret }} apiVersion: v1 kind: Secret @@ -13,4 +13,4 @@ stringData: clientSecret: {{ .Values.oidc.clientSecret }} {{- end -}} {{- end -}} -{{- end -}} \ No newline at end of file +{{- end -}} diff --git a/charts/kubecost/cost-analyzer/templates/mimir-proxy-configmap-template.yaml b/charts/kubecost/cost-analyzer/templates/mimir-proxy-configmap-template.yaml index ded965a8f..08b93ee84 100644 --- a/charts/kubecost/cost-analyzer/templates/mimir-proxy-configmap-template.yaml +++ b/charts/kubecost/cost-analyzer/templates/mimir-proxy-configmap-template.yaml @@ -12,7 +12,10 @@ data: location / { proxy_pass {{ .Values.global.mimirProxy.mimirEndpoint }}; proxy_set_header X-Scope-OrgID "{{ .Values.global.mimirProxy.orgIdentifier }}"; + {{- if .Values.global.mimirProxy.basicAuth }} + proxy_set_header Authorization "Basic {{ (printf "%s:%s" .Values.global.mimirProxy.basicAuth.username .Values.global.mimirProxy.basicAuth.password) | b64enc }}"; + {{- end }} } } {{- end }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/kubecost/cost-analyzer/templates/model-ingress-template.yaml b/charts/kubecost/cost-analyzer/templates/model-ingress-template.yaml new file mode 100644 index 000000000..55243eedb --- /dev/null +++ b/charts/kubecost/cost-analyzer/templates/model-ingress-template.yaml @@ -0,0 +1,66 @@ +{{- if .Values.kubecostModel.ingress -}} +{{- if .Values.kubecostModel.ingress.enabled -}} +{{- $fullName := include "cost-analyzer.fullname" . -}} +{{- $serviceName := include "cost-analyzer.serviceName" . -}} +{{- $ingressPaths := .Values.kubecostModel.ingress.paths -}} +{{- $ingressPathType := .Values.kubecostModel.ingress.pathType -}} +{{- $apiV1 := false -}} +{{- if and (.Capabilities.APIVersions.Has "networking.k8s.io/v1") (semverCompare "^1.19-0" .Capabilities.KubeVersion.GitVersion) }} +{{- $apiV1 = true -}} +apiVersion: networking.k8s.io/v1 +{{ else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }} +apiVersion: networking.k8s.io/v1beta1 +{{ else }} +apiVersion: extensions/v1beta1 +{{ end -}} +kind: Ingress +metadata: + name: {{ $fullName }}-model + namespace: {{ .Release.Namespace }} + labels: + {{ include "cost-analyzer.commonLabels" . | nindent 4 }} + {{- with .Values.kubecostModel.ingress.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.kubecostModel.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: +{{- if .Values.kubecostModel.ingress.className }} + ingressClassName: {{ .Values.kubecostModel.ingress.className }} +{{- end }} +{{- if .Values.kubecostModel.ingress.tls }} + tls: + {{- range .Values.kubecostModel.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.kubecostModel.ingress.hosts }} + - host: {{ . | quote }} + http: + paths: + {{- range $ingressPaths }} + {{- if $apiV1 }} + - path: {{ . }} + pathType: {{ $ingressPathType }} + backend: + service: + name: {{ $serviceName }} + port: + name: tcp-model + {{- else }} + - path: {{ . }} + backend: + serviceName: {{ $serviceName }} + servicePort: tcp-model + {{- end }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} diff --git a/charts/kubecost/cost-analyzer/templates/query-service-deployment-template.yaml b/charts/kubecost/cost-analyzer/templates/query-service-deployment-template.yaml index f3de47226..06188ff9a 100644 --- a/charts/kubecost/cost-analyzer/templates/query-service-deployment-template.yaml +++ b/charts/kubecost/cost-analyzer/templates/query-service-deployment-template.yaml @@ -6,7 +6,7 @@ metadata: name: {{ template "query-service.fullname" . }} namespace: {{ .Release.Namespace }} labels: - {{ include "query-service.commonLabels" . | nindent 4 }} + {{- include "query-service.commonLabels" . | nindent 4 }} spec: replicas: {{ .Values.kubecostDeployment.queryServiceReplicas }} serviceName: "query-service" @@ -44,7 +44,13 @@ spec: {{- end }} spec: restartPolicy: Always - + {{- if .Values.kubecostDeployment.queryService.securityContext }} + securityContext: + {{- toYaml .Values.kubecostDeployment.queryService.securityContext | nindent 8 }} + {{- else if .Values.global.securityContext }} + securityContext: + {{- toYaml .Values.global.securityContext | nindent 8 }} + {{- end }} serviceAccountName: {{ template "query-service.serviceAccountName" . }} volumes: {{- $etlBackupBucketSecret := "" }} @@ -68,7 +74,8 @@ spec: {{- end }} initContainers: - name: config-db-perms-fix - image: busybox + image: {{ .Values.kubecostDeployment.queryService.initImage.repository | default "busybox"}}:{{ .Values.kubecostDeployment.queryService.initImage.tag | default "stable"}} + imagePullPolicy: {{ .Values.kubecostDeployment.queryService.initImage.pullPolicy | default "IfNotPresent"}} command: ["sh", "-c", "/bin/chmod -R 777 /var/configs && /bin/chmod -R 777 /var/db"] volumeMounts: - name: persistent-configs @@ -100,6 +107,12 @@ spec: periodSeconds: 10 failureThreshold: 200 imagePullPolicy: Always + securityContext: + {{- if .Values.kubecostDeployment.queryService.containerSecurityContext }} + {{- toYaml .Values.kubecostDeployment.queryService.containerSecurityContext | nindent 12 -}} + {{- else if .Values.global.containerSecurityContext }} + {{- toYaml .Values.global.containerSecurityContext | nindent 12 -}} + {{- end }} args: ["query-service"] ports: - name: tcp-model diff --git a/charts/kubecost/cost-analyzer/templates/tests/_helpers.tpl b/charts/kubecost/cost-analyzer/templates/tests/_helpers.tpl new file mode 100644 index 000000000..8c1e53a56 --- /dev/null +++ b/charts/kubecost/cost-analyzer/templates/tests/_helpers.tpl @@ -0,0 +1,5 @@ +{{/* vim: set filetype=mustache: */}} + +{{- define "kubecost.test.annotations" -}} +helm.sh/hook: test +{{- end -}} diff --git a/charts/kubecost/cost-analyzer/templates/tests/basic-health.yaml b/charts/kubecost/cost-analyzer/templates/tests/basic-health.yaml new file mode 100644 index 000000000..b91bc12eb --- /dev/null +++ b/charts/kubecost/cost-analyzer/templates/tests/basic-health.yaml @@ -0,0 +1,30 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: basic-health + namespace: {{ .Release.Namespace }} + annotations: + {{- include "kubecost.test.annotations" . | nindent 4 }} +spec: + serviceAccountName: {{ template "cost-analyzer.serviceAccountName" . }} + restartPolicy: Never + containers: + - name: test-kubecost + image: alpine/k8s:1.26.9 + command: + - /bin/sh + args: + - -c + - >- + svc=$(kubectl -n {{ .Release.Namespace }} get svc -l app.kubernetes.io/name=cost-analyzer -o json | jq -r .items[0].metadata.name); + echo Getting current Kubecost state.; + response=$(curl -sL http://${svc}:9090/model/getConfigs); + code=$(echo ${response} | jq .code); + if [ "$code" -eq 200 ]; then + echo "Got Kubecost working configuration. Successful." + exit 0 + else + echo "Failed to fetch Kubecost configuration. Response was $response" + exit 1 + fi diff --git a/charts/kubecost/cost-analyzer/values-agent.yaml b/charts/kubecost/cost-analyzer/values-agent.yaml index da097271f..4f790b455 100644 --- a/charts/kubecost/cost-analyzer/values-agent.yaml +++ b/charts/kubecost/cost-analyzer/values-agent.yaml @@ -72,10 +72,11 @@ prometheus: action: keep regex: {{ template "cost-analyzer.networkCostsName" . }} server: + retention: 50h + # retentionSize: 1Gi extraArgs: storage.tsdb.min-block-duration: 2h storage.tsdb.max-block-duration: 2h - storage.tsdb.retention: 10h securityContext: runAsNonRoot: true runAsUser: 1001 diff --git a/charts/kubecost/cost-analyzer/values-eks-cost-monitoring.yaml b/charts/kubecost/cost-analyzer/values-eks-cost-monitoring.yaml index 58b773522..d68b95675 100644 --- a/charts/kubecost/cost-analyzer/values-eks-cost-monitoring.yaml +++ b/charts/kubecost/cost-analyzer/values-eks-cost-monitoring.yaml @@ -33,7 +33,7 @@ networkPolicy: podSecurityPolicy: enabled: false -# Enable this flag if you need to install with specfic image tags +# Enable this flag if you need to install with specific image tags # imageVersion: prod-1.97.0 kubecostFrontend: diff --git a/charts/kubecost/cost-analyzer/values-thanos.yaml b/charts/kubecost/cost-analyzer/values-thanos.yaml index a1dd98ba9..b48c53e70 100644 --- a/charts/kubecost/cost-analyzer/values-thanos.yaml +++ b/charts/kubecost/cost-analyzer/values-thanos.yaml @@ -7,7 +7,7 @@ global: # will greatly assist in reduction memory bloat in query. kubecostModel: maxQueryConcurrency: 5 - # This configuration is applied to thanos only. Expresses the resolution to + # This configuration is applied to thanos only. Expresses the resolution to # use for longer query ranges. Options: raw, 5m, 1h - Default: raw maxSourceResolution: 5m @@ -30,8 +30,11 @@ prometheus: - name: thanos-sidecar image: thanosio/thanos:v0.29.0 securityContext: - runAsNonRoot: true - runAsUser: 1001 + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: + - ALL args: - sidecar - --log.level=debug @@ -62,7 +65,7 @@ prometheus: subPath: "" - name: object-store-volume mountPath: /etc/config - + thanos: store: enabled: true @@ -73,10 +76,10 @@ thanos: value: "100" - name: GODEBUG value: "madvdontneed=1" - resources: + resources: requests: memory: "2.5Gi" - query: + query: enabled: true timeout: 3m # Maximum number of queries processed concurrently by query node. @@ -99,7 +102,7 @@ thanos: compressResponses: true # Downstream Tripper Configuration downstreamTripper: - enabled: true + enabled: true idleConnectionTimeout: 90s responseHeaderTimeout: 2m tlsHandshakeTimeout: 10s @@ -108,10 +111,10 @@ thanos: maxIdleConnectionsPerHost: 100 maxConnectionsPerHost: 0 # Response Cache Configuration - # Configure either a max size constraint or max items. + # Configure either a max size constraint or max items. responseCache: enabled: true - # Maximum memory size of the cache in bytes. A unit suffix (KB, MB, GB) may be applied. + # Maximum memory size of the cache in bytes. A unit suffix (KB, MB, GB) may be applied. maxSize: 1.25GB # Maximum number of entries in the cache. maxSizeItems: 0 @@ -128,7 +131,7 @@ thanos: # Thanos Sidecar Service Discovery # Disabling removes the prometheus sidecar from querier store discovery. This ensures - # that all clusters read from the same data in remote store. + # that all clusters read from the same data in remote store. sidecar: enabled: true bucket: diff --git a/charts/kubecost/cost-analyzer/values.yaml b/charts/kubecost/cost-analyzer/values.yaml index 9f9bd0076..f7e6d35f6 100644 --- a/charts/kubecost/cost-analyzer/values.yaml +++ b/charts/kubecost/cost-analyzer/values.yaml @@ -1,9 +1,9 @@ global: # zone: cluster.local (use only if your DNS server doesn't live in the same zone as kubecost) prometheus: - enabled: true # If false, Prometheus will not be installed -- Warning: Before changing this setting, please read to understand this setting https://docs.kubecost.com/install-and-configure/install/custom-prom - fqdn: http://cost-analyzer-prometheus-server.default.svc #example address of a prometheus to connect to. Include protocol (http:// or https://) Ignored if enabled: true - # insecureSkipVerify : false # If true, kubecost will not check the TLS cert of prometheus + enabled: true # If false, Prometheus will not be installed -- Warning: Before changing this setting, please read to understand this setting https://docs.kubecost.com/install-and-configure/install/custom-prom + fqdn: http://cost-analyzer-prometheus-server.default.svc # example address of a prometheus to connect to. Include protocol (http:// or https://) Ignored if enabled: true + # insecureSkipVerify: false # If true, kubecost will not check the TLS cert of prometheus # queryServiceBasicAuthSecretName: dbsecret # kubectl create secret generic dbsecret -n kubecost --from-file=USERNAME --from-file=PASSWORD # queryServiceBearerTokenSecretName: mcdbsecret # kubectl create secret generic mcdbsecret -n kubecost --from-file=TOKEN @@ -13,13 +13,13 @@ global: # queryService: http://kubecost-thanos-query-frontend-http.kubecost:{{ .Values.thanos.queryFrontend.http.port }} # an address of the thanos query-frontend endpoint, if different from installed thanos # queryServiceBasicAuthSecretName: mcdbsecret # kubectl create secret generic mcdbsecret -n kubecost --from-file=USERNAME --from-file=PASSWORD <---enter basic auth credentials like that # queryServiceBearerTokenSecretName mcdbsecret # kubectl create secret generic mcdbsecret -n kubecost --from-file=TOKEN - # queryOffset: 3h # The offset to apply to all thanos queries in order to achieve syncronization on all cluster block stores + # queryOffset: 3h # The offset to apply to all thanos queries in order to achieve synchronization on all cluster block stores grafana: - enabled: true # If false, Grafana will not be installed - domainName: cost-analyzer-grafana.default.svc #example grafana domain Ignored if enabled: true - scheme: "http" # http or https, for the domain name above. - proxy: true # If true, the kubecost frontend will route to your grafana through its service endpoint + enabled: true # If false, Grafana will not be installed + domainName: cost-analyzer-grafana.default.svc # example grafana domain Ignored if enabled: true + scheme: "http" # http or https, for the domain name above. + proxy: true # If true, the kubecost frontend will route to your grafana through its service endpoint # fqdn: cost-analyzer-grafana.default.svc # Enable only when you are using GCP Marketplace ENT listing. Learn more at https://console.cloud.google.com/marketplace/product/kubecost-public/kubecost-ent @@ -28,25 +28,25 @@ global: # Google Cloud Managed Service for Prometheus gmp: - # Remember to set up these parameters when install the Kubecost Helm chart with `global.gmp.enabled=true` if you want to use GMP self-deployed collection (Recommended) to ultilize Kubecost scrape configs. + # Remember to set up these parameters when install the Kubecost Helm chart with `global.gmp.enabled=true` if you want to use GMP self-deployed collection (Recommended) to utilize Kubecost scrape configs. # If enabling GMP, it is highly recommended to utilize Google's distribution of Prometheus. # Learn more at https://cloud.google.com/stackdriver/docs/managed-prometheus/setup-unmanaged # --set prometheus.server.image.repository="gke.gcr.io/prometheus-engine/prometheus" \ # --set prometheus.server.image.tag="v2.35.0-gmp.2-gke.0" - enabled: false # If true, kubecost will be configured to use GMP Prometheus image and query from Google Cloud Managed Service for Prometheus. - prometheusServerEndpoint: http://localhost:8085/ # The prometheus service endpoint used by kubecost. The calls are forwarded through the GMP Prom proxy side car to the GMP database. + enabled: false # If true, kubecost will be configured to use GMP Prometheus image and query from Google Cloud Managed Service for Prometheus. + prometheusServerEndpoint: http://localhost:8085/ # The prometheus service endpoint used by kubecost. The calls are forwarded through the GMP Prom proxy side car to the GMP database. gmpProxy: enabled: false - image: gke.gcr.io/prometheus-engine/frontend:v0.4.1-gke.0 # GMP Prometheus proxy image that serve as an endpoint to query metrics from GMP + image: gke.gcr.io/prometheus-engine/frontend:v0.4.1-gke.0 # GMP Prometheus proxy image that serve as an endpoint to query metrics from GMP imagePullPolicy: Always name: gmp-proxy port: 8085 - projectId: YOUR_PROJECT_ID # example GCP project ID + projectId: YOUR_PROJECT_ID # example GCP project ID # Amazon Managed Service for Prometheus amp: enabled: false # If true, kubecost will be configured to remote_write and query from Amazon Managed Service for Prometheus. - prometheusServerEndpoint: https://localhost:8085// # The prometheus service endpoint used by kubecost. The calls are forwarded through the SigV4Proxy side car to the AMP workspace. + prometheusServerEndpoint: https://localhost:8085/workspaces// # The prometheus service endpoint used by kubecost. The calls are forwarded through the SigV4Proxy side car to the AMP workspace. remoteWriteService: https://aps-workspaces.us-west-2.amazonaws.com/workspaces//api/v1/remote_write # The remote_write endpoint for the AMP workspace. sigv4: region: us-west-2 @@ -65,9 +65,11 @@ global: name: mimir-proxy image: nginxinc/nginx-unprivileged port: 8085 - mimirEndpoint: $mimir_endpoint #Your Mimir query endpoint. If your Mimir query endpoint is http://example.com/prometheus, replace $mimir_endpoint with http://example.com/ - orgIdentifier: $your_tenant_ID #Your Grafana Mimir tenant ID - + mimirEndpoint: $mimir_endpoint # Your Mimir query endpoint. If your Mimir query endpoint is http://example.com/prometheus, replace $mimir_endpoint with http://example.com/ + orgIdentifier: $your_tenant_ID # Your Grafana Mimir tenant ID + # basicAuth: + # username: user + # password: pwd notifications: # Kubecost alerting configuration @@ -131,14 +133,14 @@ global: # - type: diagnostic # Alerts when kubecost is unable to compute costs - ie: Prometheus unreachable # window: 10m - alertmanager: # Supply an alertmanager FQDN to receive notifications from the app. - enabled: false # If true, allow kubecost to write to your alertmanager - fqdn: http://cost-analyzer-prometheus-server.default.svc #example fqdn. Ignored if prometheus.enabled: true + alertmanager: # Supply an alertmanager FQDN to receive notifications from the app. + enabled: false # If true, allow kubecost to write to your alertmanager + fqdn: http://cost-analyzer-prometheus-server.default.svc # example fqdn. Ignored if prometheus.enabled: true - # Set saved Cost Allocation report(s) accessible from /reports - # Ref: http://docs.kubecost.com/saved-reports + # Set saved Cost Allocation report(s) accessible from /reports + # Ref: http://docs.kubecost.com/saved-reports savedReports: - enabled: false # If true, overwrites report parameters set through UI + enabled: false # If true, overwrites report parameters set through UI reports: - title: "Example Saved Report 0" window: "today" @@ -170,18 +172,18 @@ global: chartDisplay: "category" idle: "hide" rate: "daily" - accumulate: true # entire window resolution - filters: [] # if no filters, specify empty array + accumulate: true # entire window resolution + filters: [] # if no filters, specify empty array # Set saved Asset report(s) accessible from /reports # Ref: http://docs.kubecost.com/saved-reports assetReports: - enabled: false # If true, overwrites report parameters set through UI + enabled: false # If true, overwrites report parameters set through UI reports: - title: "Example Asset Report 0" window: "today" aggregateBy: "type" - accumulate: false # daily resolution + accumulate: false # daily resolution filters: - property: "cluster" value: "cluster-one" @@ -189,7 +191,7 @@ global: # Set saved Advanced report(s) accessible from /reports # Ref: http://docs.kubecost.com/saved-reports advancedReports: - enabled: false # If true, overwrites report parameters set through UI + enabled: false # If true, overwrites report parameters set through UI reports: - title: "Example Advanced Report 0" window: "7d" @@ -203,7 +205,7 @@ global: # Set saved Cloud Cost report(s) accessible from /reports # Ref: http://docs.kubecost.com/saved-reports cloudCostReports: - enabled: false # If true, overwrites report parameters set through UI + enabled: false # If true, overwrites report parameters set through UI reports: - title: "Cloud Cost Report 0" window: "today" @@ -217,11 +219,24 @@ global: # iam.amazonaws.com/role: role-arn additionalLabels: {} - containerSecuritycontext: {} - # readOnlyRootFilesystem: true + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + fsGroup: 1001 + runAsGroup: 1001 + runAsUser: 1001 + fsGroupChangePolicy: OnRootMismatch + containerSecurityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: true + capabilities: + drop: + - ALL # generated at http://kubecost.com/install, used for alerts tracking and free trials -kubecostToken: # "" +kubecostToken: # "" # Advanced pipeline for custom prices, enterprise key required pricingCsv: @@ -229,19 +244,19 @@ pricingCsv: location: provider: "AWS" region: "us-east-1" - URI: s3://kc-csv-test/pricing_schema.csv # a valid file URI + URI: s3://kc-csv-test/pricing_schema.csv # a valid file URI csvAccessCredentials: pricing-schema-access-secret # SAML integration for user management and RBAC, enterprise key required # Ref: https://github.com/kubecost/docs/blob/main/user-management.md saml: enabled: false - secretName: "kubecost-authzero" - #metadataSecretName: "kubecost-authzero-metadata" # One of metadataSecretName or idpMetadataURL must be set. defaults to metadataURL if set - idpMetadataURL: "https://dev-elu2z98r.auth0.com/samlp/metadata/c6nY4M37rBP0qSO1IYIqBPPyIPxLS8v2" - appRootURL: "http://localhost:9090" # sample URL - authTimeout: 1440 # number of minutes the JWT will be valid - redirectURL: "https://dev-elu2z98r.auth0.com/v2/logout" # callback URL redirected to after logout + # secretName: "kubecost-authzero" + # metadataSecretName: "kubecost-authzero-metadata" # One of metadataSecretName or idpMetadataURL must be set. defaults to metadataURL if set + # idpMetadataURL: "https://dev-elu2z98r.auth0.com/samlp/metadata/c6nY4M37rBP0qSO1IYIqBPPyIPxLS8v2" + # appRootURL: "http://localhost:9090" # sample URL + # authTimeout: 1440 # number of minutes the JWT will be valid + # redirectURL: "https://dev-elu2z98r.auth0.com/v2/logout" # callback URL redirected to after logout # audienceURI: "http://localhost:9090" # by convention, the same as the appRootURL, but any string uniquely identifying kubecost to your samp IDP. Optional if you follow the convention # nameIDFormat: "urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified" If your SAML provider requires a specific nameid format # isGLUUProvider: false # An additional URL parameter must be appended for GLUU providers @@ -249,56 +264,66 @@ saml: # decryptionKeySecret: "kubecost-sank-decryption-key" # k8s secret where the private key associated with the encryptionCertSecret is stored rbac: enabled: false - groups: - - name: admin - enabled: false # if admin is disabled, all SAML users will be able to make configuration changes to the kubecost frontend - assertionName: "http://schemas.auth0.com/userType" # a SAML Assertion, one of whose elements has a value that matches on of the values in assertionValues - assertionValues: - - "admin" - - "superusers" - - name: readonly - enabled: false # if readonly is disabled, all users authorized on SAML will default to readonly - assertionName: "http://schemas.auth0.com/userType" - assertionValues: - - "readonly" - - name: editor - enabled: true # if editor is enabled, editors will be allowed to edit reports/alerts scoped to them, and act as readers otherwise. Users will never default to editor. - assertionName: "http://schemas.auth0.com/userType" - assertionValues: - - "editor" + # groups: + # - name: admin + # enabled: false # if admin is disabled, all SAML users will be able to make configuration changes to the kubecost frontend + # assertionName: "http://schemas.auth0.com/userType" # a SAML Assertion, one of whose elements has a value that matches on of the values in assertionValues + # assertionValues: + # - "admin" + # - "superusers" + # - name: readonly + # enabled: false # if readonly is disabled, all users authorized on SAML will default to readonly + # assertionName: "http://schemas.auth0.com/userType" + # assertionValues: + # - "readonly" + # - name: editor + # enabled: true # if editor is enabled, editors will be allowed to edit reports/alerts scoped to them, and act as readers otherwise. Users will never default to editor. + # assertionName: "http://schemas.auth0.com/userType" + # assertionValues: + # - "editor" oidc: enabled: false - clientID: "" # application/client client_id paramter obtained from provider, used to make requests to server - clientSecret: "" # application/client client_secret paramter obtained from provider, used to make requests to server - secretName: "kubecost-oidc-secret" # k8s secret where clientsecret will be stored - authURL: "https://my.auth.server/authorize" # endpoint for login to auth server - loginRedirectURL: "http://my.kubecost.url/model/oidc/authorize" # Kubecost url configured in provider for redirect after authentication - discoveryURL: "https://my.auth.server/.well-known/openid-configuration" # url for OIDC endpoint discovery - skipOnlineTokenValidation: false # if true, will skip accessing OIDC introspection endpoint for online token verification, and instead try to locally validate JWT claims -# hostedDomain: "example.com" # optional, blocks access to the auth domain specified in the hd claim of the provider ID token + clientID: "" # application/client client_id parameter obtained from provider, used to make requests to server + clientSecret: "" # application/client client_secret parameter obtained from provider, used to make requests to server + # secretName: "kubecost-oidc-secret" # k8s secret where clientsecret will be stored + # For use to provide a custom OIDC Secret. Overrides the usage of oidc.clientSecret and oidc.secretName. + # Should contain the field directly. + # Can be created using raw k8s secrets, external secrets, sealed secrets, or any other method. + existingCustomSecret: + enabled: false + name: "" # name of the secret containing the client secret + + # authURL: "https://my.auth.server/authorize" # endpoint for login to auth server + # loginRedirectURL: "http://my.kubecost.url/model/oidc/authorize" # Kubecost url configured in provider for redirect after authentication + # discoveryURL: "https://my.auth.server/.well-known/openid-configuration" # url for OIDC endpoint discovery + skipOnlineTokenValidation: false # if true, will skip accessing OIDC introspection endpoint for online token verification, and instead try to locally validate JWT claims + # hostedDomain: "example.com" # optional, blocks access to the auth domain specified in the hd claim of the provider ID token rbac: enabled: false - groups: - - name: admin - enabled: false # if admin is disabled, all authenticated users will be able to make configuration changes to the kubecost frontend - claimName: "roles" # Kubecost matches this string against the JWT's payload key containing RBAC info (this value is unique across identity providers) - claimValues: # Kubecost matches these strings with the roles created in your identity provider - - "admin" - - "superusers" - - name: readonly - enabled: false # if readonly is disabled, all authenticated users will default to readonly - claimName: "roles" - claimValues: - - "readonly" - - name: editor - enabled: false # if editor is enabled, editors will be allowed to edit reports/alerts scoped to them, and act as readers otherwise. Users will never default to editor. - claimName: "roles" - claimValues: - - "editor" + # groups: + # - name: admin + # enabled: false # if admin is disabled, all authenticated users will be able to make configuration changes to the kubecost frontend + # claimName: "roles" # Kubecost matches this string against the JWT's payload key containing RBAC info (this value is unique across identity providers) + # claimValues: # Kubecost matches these strings with the roles created in your identity provider + # - "admin" + # - "superusers" + # - name: readonly + # enabled: false # if readonly is disabled, all authenticated users will default to readonly + # claimName: "roles" + # claimValues: + # - "readonly" + # - name: editor + # enabled: false # if editor is enabled, editors will be allowed to edit reports/alerts scoped to them, and act as readers otherwise. Users will never default to editor. + # claimName: "roles" + # claimValues: + # - "editor" -# Adds an httpProxy as an environment variable. systemProxy.enabled must be `true`to have any effect. -# Ref: https://www.oreilly.com/library/view/security-with-go/9781788627917/5ea6a02b-3d96-44b1-ad3c-6ab60fcbbe4f.xhtml +## Adds the HTTP_PROXY, HTTPS_PROXY, and NO_PROXY environment variables to all +## containers. Typically used in environments that have firewall rules which +## prevent kubecost from accessing cloud provider resources. +## Ref: https://www.oreilly.com/library/view/security-with-go/9781788627917/5ea6a02b-3d96-44b1-ad3c-6ab60fcbbe4f.xhtml +## systemProxy: enabled: false httpProxyUrl: "" @@ -309,6 +334,7 @@ systemProxy: # - name: "image-pull-secret" kubecostFrontend: + enabled: true image: "gcr.io/kubecost1/frontend" imagePullPolicy: Always # extraEnv: @@ -329,13 +355,14 @@ kubecostFrontend: periodSeconds: 10 failureThreshold: 200 ipv6: - enabled: true # disable if the cluster does not support ipv6 + enabled: true # disable if the cluster does not support ipv6 # allow customizing nginx-conf server block # extraServerConfig: |- # proxy_busy_buffers_size 512k; # proxy_buffers 4 512k; # proxy_buffer_size 256k; # large_client_header_buffers 4 64k; + # hideDiagnostics: false # used if the primary is not monitored. Supported in limited environments. # api: # fqdn: kubecost-api.kubecost.svc.cluster.local:9001 @@ -380,7 +407,7 @@ kubecostMetrics: annotations: {} # Service Monitor for Kubecost Metrics - serviceMonitor: # the kubecost included prometheus uses scrapeConfigs and does not support service monitors. The following options assume an existing prometheus that supports serviceMonitors. + serviceMonitor: # the kubecost included prometheus uses scrapeConfigs and does not support service monitors. The following options assume an existing prometheus that supports serviceMonitors. enabled: false additionalLabels: {} metricRelabelings: [] @@ -397,10 +424,10 @@ sigV4Proxy: imagePullPolicy: Always name: aps port: 8005 - region: us-west-2 # The AWS region - host: aps-workspaces.us-west-2.amazonaws.com # The hostname for AMP service. + region: us-west-2 # The AWS region + host: aps-workspaces.us-west-2.amazonaws.com # The hostname for AMP service. # role_arn: arn:aws:iam:::role/role-name # The AWS IAM role to assume. - extraEnv: # Pass extra env variables to sigV4Proxy + extraEnv: # Pass extra env variables to sigV4Proxy # - name: AWS_ACCESS_KEY_ID # value: # - name: AWS_SECRET_ACCESS_KEY @@ -409,6 +436,8 @@ sigV4Proxy: kubecostModel: image: "gcr.io/kubecost1/cost-model" imagePullPolicy: Always + # set to 'true' to utilize images on the public Quay repository + # openSourceOnly: false # extraEnv: # - name: SOME_VARIABLE # value: "some_value" @@ -475,9 +504,9 @@ kubecostModel: requests: cpu: "200m" memory: "55Mi" - #limits: - # cpu: "800m" - # memory: "256Mi" + # limits: + # cpu: "800m" + # memory: "256Mi" livenessProbe: enabled: false initialDelaySeconds: 30 @@ -485,6 +514,35 @@ kubecostModel: failureThreshold: 200 extraArgs: [] + # creates an ingress directly to the model container, for API access + ingress: + enabled: false + # className: nginx + labels: + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + annotations: + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + paths: ["/"] + pathType: ImplementationSpecific + hosts: + - cost-analyzer-model.local + tls: [] + # - secretName: cost-analyzer-model-tls + # hosts: + # - cost-analyzer-model.local + +# etlUtils is a utility currently used by Kubecost internal support to implement specific functionality related to Thanos conversion. +etlUtils: + enabled: false + fullImageName: null + resources: {} + env: {} + nodeSelector: {} + tolerations: {} + affinity: {} + # Basic Kubecost ingress, more examples available at https://github.com/kubecost/docs/blob/main/ingress-examples.md ingress: enabled: false @@ -495,7 +553,7 @@ ingress: annotations: # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" - paths: ["/"] # There's no need to route specifically to the pods-- we have an nginx deployed that handles routing + paths: ["/"] # There's no need to route specifically to the pods-- we have an nginx deployed that handles routing pathType: ImplementationSpecific hosts: - cost-analyzer.local @@ -517,21 +575,21 @@ affinity: {} # If true, creates a PriorityClass to be used by the cost-analyzer pod priority: enabled: false - name: "" # Provide name of existing priority class only. If left blank, upstream chart will create one from default template. + name: "" # Provide name of existing priority class only. If left blank, upstream chart will create one from default template. # value: 1000000 # If true, enable creation of NetworkPolicy resources. networkPolicy: enabled: false - denyEgress: true # create a network policy that denies egress from kubecost - sameNamespace: true # Set to true if cost analyser and prometheus are on the same namespace + denyEgress: true # create a network policy that denies egress from kubecost + sameNamespace: true # Set to true if cost analyzer and prometheus are on the same namespace # namespace: kubecost # Namespace where prometheus is installed # Cost-analyzer specific vars using the new template costAnalyzer: - enabled: false # If true, create a newtork policy for cost-analzyer - annotations: {} # annotations to be added to the network policy - additionalLabels: {} # additional labels to be added to the network policy + enabled: false # If true, create a network policy for cost-analyzer + annotations: {} # annotations to be added to the network policy + additionalLabels: {} # additional labels to be added to the network policy # Examples rules: # ingressRules: # - selectors: # allow ingress from self on all ports @@ -566,7 +624,7 @@ extraVolumeMounts: [] persistentVolume: size: 32Gi dbSize: 32.0Gi - enabled: true # Note that setting this to false means configurations will be wiped out on pod restart. + enabled: true # Note that setting this to false means configurations will be wiped out on pod restart. # storageClass: "-" # # existingClaim: kubecost-cost-analyzer # a claim in the same namespace as kubecost labels: {} @@ -587,14 +645,14 @@ remoteWrite: initImage: "gcr.io/kubecost1/sql-init" initImagePullPolicy: Always installLocal: true - remotePostgresAddress: "" # ignored if installing locally + remotePostgresAddress: "" # ignored if installing locally ## PriorityClassName ## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass priorityClassName: "" persistentVolume: size: 200Gi auth: - password: admin # change me + password: admin # change me prometheus: podSecurityPolicy: @@ -616,9 +674,12 @@ prometheus: - role: pod relabel_configs: # Scrape only the the targets matching the following metadata - - source_labels: [__meta_kubernetes_pod_label_app] + - source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_instance] action: keep - regex: {{ template "cost-analyzer.networkCostsName" . }} + regex: kubecost + - source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_name] + action: keep + regex: network-costs server: # If clusterIDConfigmap is defined, instead use user-generated configmap with key CLUSTER_ID # to use as unique cluster ID in kubecost cost-analyzer deployment. @@ -638,7 +699,7 @@ prometheus: scrape_timeout: 60s evaluation_interval: 1m external_labels: - cluster_id: cluster-one # Each cluster should have a unique ID + cluster_id: cluster-one # Each cluster should have a unique ID persistentVolume: size: 32Gi enabled: true @@ -650,6 +711,8 @@ prometheus: # operator: "Equal|Exists" # value: "value" # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + # retention: 50h This must be greater than or equal to etlHourlyStoreDurationHours + # retentionSize: should be significantly greater than the storage used in the number of hours set in etlHourlyStoreDurationHours alertmanager: enabled: false persistentVolume: @@ -657,11 +720,14 @@ prometheus: # node-export must be disabled if there is an existing daemonset: https://guide.kubecost.com/hc/en-us/articles/4407601830679-Troubleshoot-Install#a-name-node-exporter-a-issue-failedscheduling-kubecost-prometheus-node-exporter nodeExporter: enabled: true - # kubecost emits pre-2.0 KSM metrics, KSM is enabled by default here for backwards compatibity, but can be disabled to save resources without concern to kubecost metrics + + ## Default disabled since Kubecost already emits KSMv1 metrics. + ## Ref: https://docs.kubecost.com/architecture/ksm-metrics kubeStateMetrics: - enabled: true + enabled: false kube-state-metrics: - disabled: false + disabled: true + pushgateway: enabled: false persistentVolume: @@ -676,7 +742,7 @@ prometheus: # action: keep # queue_config: # max_samples_per_send: 1000 - #remote_read: + # remote_read: # - url: "http://pgprometheus-adapter:9201/read" rules: groups: @@ -717,7 +783,7 @@ networkCosts: enabled: false podSecurityPolicy: enabled: false - image: gcr.io/kubecost1/kubecost-network-costs:v0.16.8 + image: gcr.io/kubecost1/kubecost-network-costs:v0.17.0 imagePullPolicy: Always updateStrategy: type: RollingUpdate @@ -735,8 +801,8 @@ networkCosts: port: 3001 # this daemonset can use significant resources on large clusters: https://guide.kubecost.com/hc/en-us/articles/4407595973527-Network-Traffic-Cost-Allocation resources: - limits: # remove the limits by setting cpu: null - cpu: 500m # can be less, will depend on cluster size + limits: # remove the limits by setting cpu: null + cpu: 500m # can be less, will depend on cluster size # memory: it is not recommended to set a memory limit requests: cpu: 50m @@ -755,7 +821,7 @@ networkCosts: # IPv4 Link Local Address Space - "169.254.0.0/16" # Private Address Ranges in RFC-1918 - - "10.0.0.0/8" # Remove this entry if using Multi-AZ Kubernetes + - "10.0.0.0/8" # Remove this entry if using Multi-AZ Kubernetes - "172.16.0.0/12" - "192.168.0.0/16" @@ -797,7 +863,7 @@ networkCosts: azure-cloud-services: false # user defined services provide a way to define custom service endpoints which will label traffic metrics # falling within the defined address range. - #services: + # services: # - service: "test-service-1" # ips: # - "19.1.1.2" @@ -829,7 +895,10 @@ networkCosts: podMonitor: enabled: false additionalLabels: {} - additionalLabels: {} + # match the default extraScrapeConfig + additionalLabels: + app.kubernetes.io/instance: kubecost + app.kubernetes.io/name: network-costs nodeSelector: {} annotations: {} healthCheckProbes: {} @@ -868,15 +937,84 @@ kubecostDeployment: ## queryServiceReplicas: 0 queryService: + securityContext: + runAsGroup: 1001 + runAsUser: 1001 + fsGroup: 1001 + fsGroupChangePolicy: OnRootMismatch + runAsNonRoot: false + seccompProfile: + type: RuntimeDefault + containerSecurityContext: + allowPrivilegeEscalation: true + readOnlyRootFilesystem: false + capabilities: + drop: + - ALL resources: requests: - # You can use the Kubecost savings report for 'Right-size your container requests' to determine the recommended resource requests once the pod has run for 24 hours. + ## You can use the Kubecost savings report for 'Right-size your + ## container requests' to determine the recommended resource requests + ## once the pod has run for 24 hours. cpu: 1000m memory: 500Mi ## default storage class storageClass: "" databaseVolumeSize: 100Gi configVolumeSize: 1Gi + initImage: {} + +## The Kubecost Aggregator is a high scale implementation of Kubecost intended +## for large datasets and/or high query load. At present, this should only be +## enabled when recommended by Kubecost staff. +## +kubecostAggregator: + enabled: false + replicas: 1 + ## Creates a new pod to retrieve CloudCost data. By default it uses the same + ## serviceaccount as the cost-analyzer pod. A custom serviceaccount can be + ## specified. + cloudCost: + enabled: false + # serviceAccountName: + jaeger: + enabled: false + image: jaegertracing/all-in-one + imageVersion: latest + # containerSecurityContext: + # fullImageName: + resources: {} + env: + "LOG_LEVEL": "info" + persistentConfigsStorage: + # default storage class + storageClass: "" + storageRequest: 1Gi + aggregatorStorage: + # default storage class + storageClass: "" + storageRequest: 20Gi + aggregatorDbStorage: + # default storage class + storageClass: "" + storageRequest: 128Gi + # securityContext: + # runAsGroup: 1001 + # runAsUser: 1001 + # fsGroup: 1001 + # fsGroupChangePolicy: OnRootMismatch + # seccompProfile: + # type: RuntimeDefault + # runAsNonRoot: true + # containerSecurityContext: + # allowPrivilegeEscalation: false + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # seccompProfile: + # type: RuntimeDefault + # capabilities: + # drop: + # - ALL # Kubecost Cluster Controller for Right Sizing and Cluster Turndown clusterController: @@ -909,7 +1047,7 @@ reporting: # googleAnalyticsTag is only included in our Enterprise offering. # googleAnalyticsTag: G-XXXXXXXXX -serviceMonitor: # the kubecost included prometheus uses scrapeConfigs and does not support service monitors. The following options assume an existing prometheus that supports serviceMonitors. +serviceMonitor: # the kubecost included prometheus uses scrapeConfigs and does not support service monitors. The following options assume an existing prometheus that supports serviceMonitors. enabled: false additionalLabels: {} metricRelabelings: [] @@ -927,12 +1065,12 @@ prometheusRule: supportNFS: false # initChownDataImage ensures all Kubecost filepath permissions on PV or local storage are set up correctly. -initChownDataImage: "busybox" # Supports a fully qualified Docker image, e.g. registry.hub.docker.com/library/busybox:latest +initChownDataImage: "busybox" # Supports a fully qualified Docker image, e.g. registry.hub.docker.com/library/busybox:latest initChownData: resources: {} - #requests: - # cpu: "50m" - # memory: "20Mi" + # requests: + # cpu: "50m" + # memory: "20Mi" grafana: # namespace_datasources: kubecost # override the default namespace here @@ -973,7 +1111,7 @@ grafana: serve_from_sub_path: true root_url: "%(protocol)s://%(domain)s:%(http_port)s/grafana" serviceAccount: - create: true # Set this to false if you're bringing your own service account. + create: true # Set this to false if you're bringing your own service account. annotations: {} # name: kc-test awsstore: @@ -983,24 +1121,63 @@ awsstore: ## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass priorityClassName: "" +## Federated ETL Architecture +## Ref: https://docs.kubecost.com/install-and-configure/install/multi-cluster/federated-etl +## federatedETL: - federatedCluster: false # whether this cluster should push data to the Federated store - primaryCluster: false # whether this cluster should load data from the combined section of the Federated store - useExistingS3Config: false # will attempt to use existing object-store.yaml configs for S3 backup/Thanos as config for the Federated store - redirectS3Backup: false # changes the dir of S3 backup to the Federated combined store, for using Thanos-federated data in the Federated ETL. Note S3 backup should be enabled separately for this. - useMultiClusterDB: false # set to true if you have a single federated PromQL DB with metrics from all monitored clusters but want to use federation for performance + ## If true, push ETL data to the federated storage bucket + federatedCluster: false + + ## If true, load ETL data from the combined storage bucket to display data + ## from all monitored clusters. Note, if this is your first time setting up + ## Federated ETL, ensure you see federated ETL data in combined storage before + ## setting this config to true. + primaryCluster: false + + ## If true, changes the dir of S3 backup to the Federated combined store. + ## Commonly used when transitioning from Thanos to Federated ETL architecture. + redirectS3Backup: false + + ## If true, will query metrics from a central PromQL DB (e.g. Amazon Managed + ## Prometheus) + useMultiClusterDB: false + + ## The Federator is responsible for combining each cluster's ETL files located + ## in the federated storage bucket, and placing results in the combined + ## storage bucket. federator: - enabled: false # enables the federator to run inside the costmodel container, federating the data in the Federated store - clusters: [] # optional. Whitelist of clusters by cluster id. If not set, the federator will attempt to federated all clusters pushing to the federated storage. - # primaryClusterID: "cluster_id" # optional. Used when reconciliation is expected to occur on the Primary. - # federationCutoffDate: "2022-10-18T00:00:00.000Z" # an RFC 3339-formatted string. All ETL files with windows that fall before this time are not processed by the Federator. If this is not set, the Federator will process all files regardless of date. - resources: {} # you can use the Kubecost savings report for 'Right-size your container requests' to determine the recommended resource requests once the pod has run for 24 hours. + enabled: false + + ## Optional. Used when reconciliation is expected to occur on the Primary. + # primaryClusterID: "cluster_id" + + ## Optional. Allowlist of which cluster_ids to federate. If not set, the + ## federator will attempt to federated all clusters pushing to the federated + ## storage. + clusters: [] + + ## Optional. An RFC 3339-formatted string. All ETL files with windows that + ## fall before this time are not processed by the Federator. If this is not + ## set, the Federator will process all files regardless of date. + # federationCutoffDate: "2022-10-18T00:00:00.000Z" + + ## Optional. You can use the Kubecost savings report for 'Right-size your + ## container requests' to determine the recommended resource requests once + ## the pod has run for 24 hours. + resources: {} # requests: # cpu: 100m # memory: 500Mi +## Kubecost Admission Controller (beta feature) +## To use this feature, ensure you have run the `create-admission-controller.sh` +## script. This generates a k8s secret with TLS keys/certificats and a +## corresponding CA bundle. +## kubecostAdmissionController: enabled: false + secretName: webhook-server-tls + caBundle: ${CA_BUNDLE} # Enables or disables the Cost Event Audit pipeline, which tracks recent changes at cluster level # and provides an estimated cost impact via the Kubecost Predict API. @@ -1009,13 +1186,14 @@ kubecostAdmissionController: costEventsAudit: enabled: false - -#readonly: false # disable updates to kubecost from the frontend UI and via POST request +## Disable updates to kubecost from the frontend UI and via POST request +## +# readonly: false # These configs can also be set from the Settings page in the Kubecost product UI # Values in this block override config changes in the Settings UI on pod restart # -#kubecostProductConfigs: +# kubecostProductConfigs: # An optional list of cluster definitions that can be added for frontend access. The local # cluster is *always* included by default, so this list is for non-local clusters. # Ref: https://github.com/kubecost/docs/blob/main/multi-cluster.md @@ -1068,6 +1246,7 @@ costEventsAudit: # masterPayerARN: "" # projectID: "123456789" # Also known as AccountID on AWS -- the current account/project that this instance of Kubecost is deployed on. # gcpSecretName: gcp-secret # Name of a secret representing the gcp service key +# gcpSecretKeyName: compute-viewer-kubecost-key.json # Name of the secret's key containing the gcp service key # bigQueryBillingDataDataset: billing_data.gcp_billing_export_v1_01AC9F_74CF1D_5565A2 # labelMappingConfigs: # names of k8s labels or annotations used to designate different allocation concepts # enabled: true @@ -1092,7 +1271,7 @@ costEventsAudit: # grafanaURL: "" # clusterName: "" # clusterName is the default context name in settings. # clusterAccountID: "" # Manually set Account property for assets -# currencyCode: "USD" # official support for USD, AUD, BRL, CAD, CHF, CNY, DKK, EUR, GBP, INR, JPY, NOK, PLN, SEK +# currencyCode: "USD" # official support for USD, AUD, BRL, CAD, CHF, CNY, DKK, EUR, GBP, IDR, INR, JPY, NOK, PLN, SEK # azureBillingRegion: US # Represents 2-letter region code, e.g. West Europe = NL, Canada = CA. ref: https://en.wikipedia.org/wiki/List_of_ISO_3166_country_codes # azureSubscriptionID: 0bd50fdf-c923-4e1e-850c-196dd3dcc5d3 # azureClientID: f2ef6f7d-71fb-47c8-b766-8d63a19db017 @@ -1118,11 +1297,6 @@ costEventsAudit: # ingestPodUID: false # Enables using UIDs to uniquely ID pods. This requires either Kubecost's replicated KSM metrics, or KSM v2.1.0+. This may impact performance, and changes the default cost-model allocation behavior. # regionOverrides: "region1,region2,region3" # list of regions which will override default costmodel provider regions -#kubecostAdmissionController: -# enabled: true -# secretName: webhook-server-tls -# caBundle: ${CA_BUNDLE} - # -- Array of extra K8s manifests to deploy ## Note: Supports use of custom Helm templates extraObjects: [] @@ -1151,5 +1325,3 @@ extraObjects: [] # host: kubecost.kubecost.svc.cluster.local # port: # number: 80 - - diff --git a/charts/nats/nats/Chart.yaml b/charts/nats/nats/Chart.yaml index 2ca8a0e39..acf50861a 100644 --- a/charts/nats/nats/Chart.yaml +++ b/charts/nats/nats/Chart.yaml @@ -18,4 +18,4 @@ maintainers: name: The NATS Authors url: https://github.com/nats-io name: nats -version: 1.1.3 +version: 1.1.4 diff --git a/charts/nats/nats/files/nats-box/deployment/pod-template.yaml b/charts/nats/nats/files/nats-box/deployment/pod-template.yaml index 47e6fbbbe..71056bfb6 100644 --- a/charts/nats/nats/files/nats-box/deployment/pod-template.yaml +++ b/charts/nats/nats/files/nats-box/deployment/pod-template.yaml @@ -9,6 +9,13 @@ spec: # service discovery uses DNS; don't need service env vars enableServiceLinks: false + + {{- with .Values.global.image.pullSecretNames }} + imagePullSecrets: + {{- range . }} + - name: {{ . | quote }} + {{- end }} + {{- end }} {{- with .Values.natsBox.serviceAccount }} {{- if .enabled }} diff --git a/charts/nats/nats/files/stateful-set/pod-template.yaml b/charts/nats/nats/files/stateful-set/pod-template.yaml index 9aa84f0a5..bb1d8d7be 100644 --- a/charts/nats/nats/files/stateful-set/pod-template.yaml +++ b/charts/nats/nats/files/stateful-set/pod-template.yaml @@ -28,6 +28,13 @@ spec: # service discovery uses DNS; don't need service env vars enableServiceLinks: false + + {{- with .Values.global.image.pullSecretNames }} + imagePullSecrets: + {{- range . }} + - name: {{ . | quote }} + {{- end }} + {{- end }} {{- with .Values.serviceAccount }} {{- if .enabled }} diff --git a/charts/nats/nats/values.yaml b/charts/nats/nats/values.yaml index 882cf4343..7687dfbf9 100644 --- a/charts/nats/nats/values.yaml +++ b/charts/nats/nats/values.yaml @@ -6,6 +6,10 @@ global: # global image pull policy to use for all container images in the chart # can be overridden by individual image pullPolicy pullPolicy: + # global list of secret names to use as image pull secrets for all pod specs in the chart + # secrets must exist in the same namespace + # https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + pullSecretNames: [] # global registry to use for all container images in the chart # can be overridden by individual image registry registry: diff --git a/charts/redpanda/redpanda/Chart.lock b/charts/redpanda/redpanda/Chart.lock index beaf12d04..aad55ce6c 100644 --- a/charts/redpanda/redpanda/Chart.lock +++ b/charts/redpanda/redpanda/Chart.lock @@ -6,4 +6,4 @@ dependencies: repository: https://charts.redpanda.com version: 0.1.7 digest: sha256:2be209fa1660b3c8a030bb35e9e7fa25dcb81aa456ce7a73c2ab1ae6eebb3d04 -generated: "2023-10-30T17:31:44.018230015Z" +generated: "2023-10-31T17:32:18.636285339Z" diff --git a/charts/redpanda/redpanda/Chart.yaml b/charts/redpanda/redpanda/Chart.yaml index d88858d56..68c12dbb1 100644 --- a/charts/redpanda/redpanda/Chart.yaml +++ b/charts/redpanda/redpanda/Chart.yaml @@ -1,7 +1,7 @@ annotations: artifacthub.io/images: | - name: redpanda - image: docker.redpanda.com/redpandadata/redpanda:v23.2.13 + image: docker.redpanda.com/redpandadata/redpanda:v23.2.14 - name: busybox image: busybox:latest - name: mintel/docker-alpine-bash-curl-jq @@ -17,7 +17,7 @@ annotations: catalog.cattle.io/kube-version: '>=1.21-0' catalog.cattle.io/release-name: redpanda apiVersion: v2 -appVersion: v23.2.13 +appVersion: v23.2.14 dependencies: - condition: console.enabled name: console @@ -37,4 +37,4 @@ name: redpanda sources: - https://github.com/redpanda-data/helm-charts type: application -version: 5.6.37 +version: 5.6.38 diff --git a/charts/speedscale/speedscale-operator/Chart.yaml b/charts/speedscale/speedscale-operator/Chart.yaml index 7e8fdc491..9ae42a8bf 100644 --- a/charts/speedscale/speedscale-operator/Chart.yaml +++ b/charts/speedscale/speedscale-operator/Chart.yaml @@ -4,7 +4,7 @@ annotations: catalog.cattle.io/kube-version: '>= 1.17.0-0' catalog.cattle.io/release-name: speedscale-operator apiVersion: v1 -appVersion: 1.3.541 +appVersion: 1.4.5 description: Stress test your APIs with real world scenarios. Collect and replay traffic without scripting. home: https://speedscale.com @@ -24,4 +24,4 @@ maintainers: - email: support@speedscale.com name: Speedscale Support name: speedscale-operator -version: 1.3.44 +version: 1.4.0 diff --git a/charts/speedscale/speedscale-operator/README.md b/charts/speedscale/speedscale-operator/README.md index 3ef427a3d..39c05b30a 100644 --- a/charts/speedscale/speedscale-operator/README.md +++ b/charts/speedscale/speedscale-operator/README.md @@ -101,10 +101,10 @@ _See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documen A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an incompatible breaking change needing manual actions. -### Upgrade to 1.3.44 +### Upgrade to 1.3.47 ```bash -kubectl apply --server-side -f https://raw.githubusercontent.com/speedscale/operator-helm/main/1.3.44/templates/crds/trafficreplays.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/speedscale/operator-helm/main/1.3.47/templates/crds/trafficreplays.yaml ``` ### Upgrade to 1.1.0 diff --git a/charts/speedscale/speedscale-operator/app-readme.md b/charts/speedscale/speedscale-operator/app-readme.md index 3ef427a3d..39c05b30a 100644 --- a/charts/speedscale/speedscale-operator/app-readme.md +++ b/charts/speedscale/speedscale-operator/app-readme.md @@ -101,10 +101,10 @@ _See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documen A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an incompatible breaking change needing manual actions. -### Upgrade to 1.3.44 +### Upgrade to 1.3.47 ```bash -kubectl apply --server-side -f https://raw.githubusercontent.com/speedscale/operator-helm/main/1.3.44/templates/crds/trafficreplays.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/speedscale/operator-helm/main/1.3.47/templates/crds/trafficreplays.yaml ``` ### Upgrade to 1.1.0 diff --git a/charts/speedscale/speedscale-operator/templates/admission.yaml b/charts/speedscale/speedscale-operator/templates/admission.yaml index 4242c8f22..fc260fecb 100644 --- a/charts/speedscale/speedscale-operator/templates/admission.yaml +++ b/charts/speedscale/speedscale-operator/templates/admission.yaml @@ -48,6 +48,7 @@ webhooks: {{- else }} namespaceSelector: {} {{- end }} + reinvocationPolicy: IfNeeded rules: - apiGroups: - apps @@ -64,6 +65,16 @@ webhooks: - daemonsets - jobs - replicasets + - apiGroups: + - "" + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + - DELETE + resources: + - pods sideEffects: None timeoutSeconds: 10 --- diff --git a/charts/speedscale/speedscale-operator/templates/configmap.yaml b/charts/speedscale/speedscale-operator/templates/configmap.yaml index 395201b96..e34fee4ad 100644 --- a/charts/speedscale/speedscale-operator/templates/configmap.yaml +++ b/charts/speedscale/speedscale-operator/templates/configmap.yaml @@ -24,3 +24,5 @@ data: WITH_INSPECTOR: {{ .Values.dashboardAccess | quote }} API_KEY_SECRET_NAME: {{ .Values.apiKeySecret | quote }} DEPLOY_DEMO: {{ .Values.deployDemo }} + GLOBAL_ANNOTATIONS: {{ .Values.globalAnnotations | toJson | quote }} + GLOBAL_LABELS: {{ .Values.globalLabels | toJson | quote }} diff --git a/charts/speedscale/speedscale-operator/templates/deployments.yaml b/charts/speedscale/speedscale-operator/templates/deployments.yaml index 2bcff0af9..26d7421bf 100644 --- a/charts/speedscale/speedscale-operator/templates/deployments.yaml +++ b/charts/speedscale/speedscale-operator/templates/deployments.yaml @@ -10,6 +10,9 @@ metadata: labels: app: speedscale-operator controlplane.speedscale.com/component: operator + {{- if .Values.globalLabels }} +{{ toYaml .Values.globalLabels | indent 4}} + {{- end }} name: speedscale-operator namespace: {{ .Release.Namespace }} spec: @@ -23,11 +26,17 @@ spec: template: metadata: annotations: + {{- if .Values.globalAnnotations }} +{{ toYaml .Values.globalAnnotations | indent 8}} + {{- end }} checksum/config: | {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} labels: app: speedscale-operator controlplane.speedscale.com/component: operator + {{- if .Values.globalLabels }} +{{ toYaml .Values.globalLabels | indent 8}} + {{- end }} spec: containers: - command: diff --git a/charts/speedscale/speedscale-operator/templates/hooks.yaml b/charts/speedscale/speedscale-operator/templates/hooks.yaml index 120f407da..24e733f3e 100644 --- a/charts/speedscale/speedscale-operator/templates/hooks.yaml +++ b/charts/speedscale/speedscale-operator/templates/hooks.yaml @@ -12,11 +12,23 @@ metadata: creationTimestamp: null name: speedscale-operator-pre-install namespace: {{ .Release.Namespace }} + labels: + {{- if .Values.globalLabels }} +{{ toYaml .Values.globalLabels | indent 4}} + {{- end }} spec: backoffLimit: 0 template: metadata: + annotations: + {{- if .Values.globalAnnotations }} +{{ toYaml .Values.globalAnnotations | indent 8}} + {{- end }} creationTimestamp: null + labels: + {{- if .Values.globalLabels }} +{{ toYaml .Values.globalLabels | indent 8}} + {{- end }} spec: containers: - args: diff --git a/charts/speedscale/speedscale-operator/templates/tls.yaml b/charts/speedscale/speedscale-operator/templates/tls.yaml index 472112b70..c9a17f296 100644 --- a/charts/speedscale/speedscale-operator/templates/tls.yaml +++ b/charts/speedscale/speedscale-operator/templates/tls.yaml @@ -23,11 +23,23 @@ metadata: creationTimestamp: null name: speedscale-operator-create-jks namespace: {{ .Release.Namespace }} + labels: + {{- if .Values.globalLabels }} +{{ toYaml .Values.globalLabels | indent 4}} + {{- end }} spec: backoffLimit: 0 template: metadata: + annotations: + {{- if .Values.globalAnnotations }} +{{ toYaml .Values.globalAnnotations | indent 8}} + {{- end }} creationTimestamp: null + labels: + {{- if .Values.globalAnnotations }} +{{ toYaml .Values.globalAnnotations | indent 8}} + {{- end }} spec: containers: - args: diff --git a/charts/speedscale/speedscale-operator/values.yaml b/charts/speedscale/speedscale-operator/values.yaml index 71b3f431a..86f11a359 100644 --- a/charts/speedscale/speedscale-operator/values.yaml +++ b/charts/speedscale/speedscale-operator/values.yaml @@ -20,7 +20,7 @@ clusterName: "my-cluster" # Speedscale components image settings. image: registry: gcr.io/speedscale - tag: v1.3.541 + tag: v1.4.5 pullPolicy: Always # Log level for Speedscale components. @@ -49,11 +49,22 @@ dlp: # https://docs.tigera.io/calico/3.25/getting-started/kubernetes/managed-public-cloud/eks#install-eks-with-calico-networking hostNetwork: false +# A set of annotations to be applied to all Speedscale related deployments, +# services, jobs, pods, etc. +# # Example: # annotation.first: value # annotation.second: value globalAnnotations: {} +# A set of labels to be applied to all Speedscale related deployments, +# services, jobs, pods, etc. +# +# Example: +# label1: value +# label2: value +globalLabels: {} + # A full affinity object as detailed: https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity affinity: {} diff --git a/charts/sysdig/sysdig/CHANGELOG.md b/charts/sysdig/sysdig/CHANGELOG.md index 3c6d7ee4e..2a6596d01 100644 --- a/charts/sysdig/sysdig/CHANGELOG.md +++ b/charts/sysdig/sysdig/CHANGELOG.md @@ -10,6 +10,9 @@ Manual edits are supported only below '## Change Log' and should be used exclusively to fix incorrect entries and not to add new ones. ## Change Log +# v1.16.20 +### Bug Fixes +* **agent,kspm-collector,node-analyzer,sysdig** [f378d192](https://github.com/sysdiglabs/charts/commit/f378d1922d0f21f5936c6ed872e6538536ccf1f2): whitespace errors ([#1436](https://github.com/sysdiglabs/charts/issues/1436)) # v1.16.19 ### New Features * [629c5f06](https://github.com/sysdiglabs/charts/commit/629c5f06ccab168b9460ec1fa6564aed1a54c015): release agent 12.17.1 ([#1430](https://github.com/sysdiglabs/charts/issues/1430)) diff --git a/charts/sysdig/sysdig/Chart.yaml b/charts/sysdig/sysdig/Chart.yaml index bff65d0d4..0ae35a048 100644 --- a/charts/sysdig/sysdig/Chart.yaml +++ b/charts/sysdig/sysdig/Chart.yaml @@ -19,4 +19,4 @@ name: sysdig sources: - https://app.sysdigcloud.com/#/settings/user - https://github.com/draios/sysdig -version: 1.16.19 +version: 1.16.20 diff --git a/charts/sysdig/sysdig/RELEASE-NOTES.md b/charts/sysdig/sysdig/RELEASE-NOTES.md index 8727787d1..03ac54ef1 100644 --- a/charts/sysdig/sysdig/RELEASE-NOTES.md +++ b/charts/sysdig/sysdig/RELEASE-NOTES.md @@ -1,5 +1,5 @@ # What's Changed -### New Features -- [629c5f06](https://github.com/sysdiglabs/charts/commit/629c5f06ccab168b9460ec1fa6564aed1a54c015): release agent 12.17.1 ([#1430](https://github.com/sysdiglabs/charts/issues/1430)) -#### Full diff: https://github.com/sysdiglabs/charts/compare/sysdig-deploy-1.26.6...sysdig-1.16.19 +### Bug Fixes +- **agent,kspm-collector,node-analyzer,sysdig** [f378d192](https://github.com/sysdiglabs/charts/commit/f378d1922d0f21f5936c6ed872e6538536ccf1f2): whitespace errors ([#1436](https://github.com/sysdiglabs/charts/issues/1436)) +#### Full diff: https://github.com/sysdiglabs/charts/compare/sysdig-deploy-1.29.1...sysdig-1.16.20 diff --git a/charts/sysdig/sysdig/templates/secrets.yaml b/charts/sysdig/sysdig/templates/secrets.yaml index bce603a40..ecf75beb8 100644 --- a/charts/sysdig/sysdig/templates/secrets.yaml +++ b/charts/sysdig/sysdig/templates/secrets.yaml @@ -1,4 +1,5 @@ {{- if not .Values.sysdig.existingAccessKeySecret }} +--- apiVersion: v1 kind: Secret metadata: @@ -7,10 +8,10 @@ metadata: {{ include "sysdig.labels" . | indent 4 }} type: Opaque data: - access-key : {{ required "A valid .Values.sysdig.accessKey is required" .Values.sysdig.accessKey | b64enc | quote }} ---- + access-key: {{ required "A valid .Values.sysdig.accessKey is required" .Values.sysdig.accessKey | b64enc | quote }} {{- end }} {{- range .Values.extraSecrets }} +--- apiVersion: v1 kind: Secret metadata: @@ -20,5 +21,4 @@ metadata: type: Opaque data: {{ toYaml .data | indent 2 }} ---- {{- end }} diff --git a/charts/weka/csi-wekafsplugin/CHANGELOG.md b/charts/weka/csi-wekafsplugin/CHANGELOG.md index 689c50753..61c3c7e97 100644 --- a/charts/weka/csi-wekafsplugin/CHANGELOG.md +++ b/charts/weka/csi-wekafsplugin/CHANGELOG.md @@ -1,19 +1,17 @@ ## What's Changed -### New features -* feat(CSI-159): add weka driver monitoring for readiness probe by @sergeyberezansky in https://github.com/weka/csi-wekafs/pull/58 + +### Features +* feat(CSI-166): update CSI spec to 1.9.0 by @sergeyberezansky in https://github.com/weka/csi-wekafs/pull/178 + +### Bug Fixes +* fix(CSI-163): missing ca-certificates package in wekafs container image by @sergeyberezansky in https://github.com/weka/csi-wekafs/pull/179 + ### Miscellaneous -* chore(deps): update actions/checkout action to v4 by @renovate in https://github.com/weka/csi-wekafs/pull/152 -* fix(deps): update kubernetes packages to v0.28.1 by @renovate in https://github.com/weka/csi-wekafs/pull/139 -* fix(deps): update module github.com/google/uuid to v1.3.1 by @renovate in https://github.com/weka/csi-wekafs/pull/148 -* fix(deps): update module github.com/rs/zerolog to v1.30.0 by @renovate in https://github.com/weka/csi-wekafs/pull/146 -* fix(deps): update module google.golang.org/grpc to v1.58.0 by @renovate in https://github.com/weka/csi-wekafs/pull/145 -* fix(deps): update module github.com/kubernetes-csi/csi-lib-utils to v0.15.0 by @renovate in https://github.com/weka/csi-wekafs/pull/149 -* fix(deps): update opentelemetry-go monorepo to v1.17.0 by @renovate in https://github.com/weka/csi-wekafs/pull/151 -* fix(deps): update golang.org/x/exp digest to 9212866 by @renovate in https://github.com/weka/csi-wekafs/pull/144 -* chore(deps): update docker/build-push-action action to v5 by @renovate in https://github.com/weka/csi-wekafs/pull/154 -* chore(deps): update docker/login-action action to v3 by @renovate in https://github.com/weka/csi-wekafs/pull/155 -* chore(deps): update docker/setup-buildx-action action to v3 by @renovate in https://github.com/weka/csi-wekafs/pull/156 +* chore(deps): update actions/checkout digest to b4ffde6 by @renovate in https://github.com/weka/csi-wekafs/pull/161 +* chore(deps): update stefanzweifel/git-auto-commit-action action to v5 by @renovate in https://github.com/weka/csi-wekafs/pull/167 +* chore(deps): update helm/chart-testing-action action to v2.6.0 by @renovate in https://github.com/weka/csi-wekafs/pull/181 +* chore(deps): bump dependencies by @sergeyberezansky in https://github.com/weka/csi-wekafs/pull/177 diff --git a/charts/weka/csi-wekafsplugin/Chart.yaml b/charts/weka/csi-wekafsplugin/Chart.yaml index 252f050ae..f07104ec7 100644 --- a/charts/weka/csi-wekafsplugin/Chart.yaml +++ b/charts/weka/csi-wekafsplugin/Chart.yaml @@ -11,7 +11,7 @@ annotations: catalog.cattle.io/kube-version: '>=1.18.0' catalog.cattle.io/release-name: csi-wekafsplugin apiVersion: v2 -appVersion: v2.3.0 +appVersion: v2.3.1 description: Helm chart for Deployment of WekaIO Container Storage Interface (CSI) plugin for WekaFS - the world fastest filesystem home: https://github.com/weka/csi-wekafs @@ -27,6 +27,6 @@ maintainers: url: https://weka.io name: csi-wekafsplugin sources: -- https://github.com/weka/csi-wekafs/tree/v2.3.0 +- https://github.com/weka/csi-wekafs/tree/v2.3.1 type: application -version: 2.3.0 +version: 2.3.1 diff --git a/charts/weka/csi-wekafsplugin/README.md b/charts/weka/csi-wekafsplugin/README.md index 9583e0f5e..2005778b8 100644 --- a/charts/weka/csi-wekafsplugin/README.md +++ b/charts/weka/csi-wekafsplugin/README.md @@ -3,7 +3,7 @@ Helm chart for Deployment of WekaIO Container Storage Interface (CSI) plugin for [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) [![Artifact HUB](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/csi-wekafs)](https://artifacthub.io/packages/search?repo=csi-wekafs) -![Version: 2.3.0](https://img.shields.io/badge/Version-2.3.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v2.3.0](https://img.shields.io/badge/AppVersion-v2.3.0-informational?style=flat-square) +![Version: 2.3.1](https://img.shields.io/badge/Version-2.3.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v2.3.1](https://img.shields.io/badge/AppVersion-v2.3.1-informational?style=flat-square) ## Homepage https://github.com/weka/csi-wekafs @@ -56,7 +56,7 @@ Kubernetes: `>=1.18.0` |-----|------|---------|-------------| | dynamicProvisionPath | string | `"csi-volumes"` | Directory in root of file system where dynamic volumes are provisioned | | csiDriverName | string | `"csi.weka.io"` | Name of the driver (and provisioner) | -| csiDriverVersion | string | `"2.3.0"` | CSI driver version | +| csiDriverVersion | string | `"2.3.1"` | CSI driver version | | images.livenessprobesidecar | string | `"registry.k8s.io/sig-storage/livenessprobe:v2.10.0"` | CSI liveness probe sidecar image URL | | images.attachersidecar | string | `"registry.k8s.io/sig-storage/csi-attacher:v4.3.0"` | CSI attacher sidecar image URL | | images.provisionersidecar | string | `"registry.k8s.io/sig-storage/csi-provisioner:v3.5.0"` | CSI provisioner sidecar image URL | @@ -64,7 +64,7 @@ Kubernetes: `>=1.18.0` | images.resizersidecar | string | `"registry.k8s.io/sig-storage/csi-resizer:v1.8.0"` | CSI resizer sidecar image URL | | images.snapshottersidecar | string | `"registry.k8s.io/sig-storage/csi-snapshotter:v6.2.2"` | CSI snapshotter sidecar image URL | | images.csidriver | string | `"quay.io/weka.io/csi-wekafs"` | CSI driver main image URL | -| images.csidriverTag | string | `"2.3.0"` | CSI driver tag | +| images.csidriverTag | string | `"2.3.1"` | CSI driver tag | | globalPluginTolerations | list | `[{"effect":"NoSchedule","key":"node-role.kubernetes.io/master","operator":"Exists"}]` | Tolerations for all CSI driver components | | controllerPluginTolerations | list | `[{"effect":"NoSchedule","key":"node-role.kubernetes.io/master","operator":"Exists"}]` | Tolerations for CSI controller component only (by default same as global) | | nodePluginTolerations | list | `[{"effect":"NoSchedule","key":"node-role.kubernetes.io/master","operator":"Exists"}]` | Tolerations for CSI node component only (by default same as global) | @@ -102,4 +102,4 @@ Kubernetes: `>=1.18.0` | pluginConfig.mutuallyExclusiveMountOptions[0] | string | `"readcache,writecache,coherent,forcedirect"` | | ---------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.11.2](https://github.com/norwoodj/helm-docs/releases/v1.11.2) +Autogenerated from chart metadata using [helm-docs v1.11.3](https://github.com/norwoodj/helm-docs/releases/v1.11.3) diff --git a/charts/weka/csi-wekafsplugin/values.yaml b/charts/weka/csi-wekafsplugin/values.yaml index 07ddece29..accd73719 100644 --- a/charts/weka/csi-wekafsplugin/values.yaml +++ b/charts/weka/csi-wekafsplugin/values.yaml @@ -5,7 +5,7 @@ dynamicProvisionPath: "csi-volumes" # -- Name of the driver (and provisioner) csiDriverName: "csi.weka.io" # -- CSI driver version -csiDriverVersion: &csiDriverVersion 2.3.0 +csiDriverVersion: &csiDriverVersion 2.3.1 images: # -- CSI liveness probe sidecar image URL livenessprobesidecar: registry.k8s.io/sig-storage/livenessprobe:v2.10.0 diff --git a/index.yaml b/index.yaml index 0d7af3651..e2425bbb3 100644 --- a/index.yaml +++ b/index.yaml @@ -80,6 +80,63 @@ entries: - assets/datawiza/access-broker-0.1.1.tgz version: 0.1.1 airflow: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Apache Airflow + catalog.cattle.io/kube-version: '>=1.19-0' + catalog.cattle.io/release-name: airflow + category: WorkFlow + images: | + - name: airflow-exporter + image: docker.io/bitnami/airflow-exporter:0.20220314.0-debian-11-r438 + - name: airflow-scheduler + image: docker.io/bitnami/airflow-scheduler:2.7.2-debian-11-r0 + - name: airflow-worker + image: docker.io/bitnami/airflow-worker:2.7.2-debian-11-r1 + - name: airflow + image: docker.io/bitnami/airflow:2.7.2-debian-11-r1 + - name: git + image: docker.io/bitnami/git:2.42.0-debian-11-r45 + - name: os-shell + image: docker.io/bitnami/os-shell:11-debian-11-r90 + licenses: Apache-2.0 + apiVersion: v2 + appVersion: 2.7.2 + created: "2023-11-06T14:43:14.877206718Z" + dependencies: + - condition: redis.enabled + name: redis + repository: file://./charts/redis + version: 18.x.x + - condition: postgresql.enabled + name: postgresql + repository: file://./charts/postgresql + version: 13.x.x + - name: common + repository: file://./charts/common + tags: + - bitnami-common + version: 2.x.x + description: Apache Airflow is a tool to express and execute workflows as directed + acyclic graphs (DAGs). It includes utilities to schedule tasks, monitor task + progress and handle task dependencies. + digest: 0438798e49105c7a5f2c8c90b98e5643c65dcfe8035f39bf55fc88dd67faea93 + home: https://bitnami.com + icon: https://svn.apache.org/repos/asf/comdev/project-logos/originals/airflow-1.svg + keywords: + - apache + - airflow + - workflow + - dag + maintainers: + - name: VMware, Inc. + url: https://github.com/bitnami/charts + name: airflow + sources: + - https://github.com/bitnami/charts/tree/main/bitnami/airflow + urls: + - assets/bitnami/airflow-16.1.0.tgz + version: 16.1.0 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: Apache Airflow @@ -2110,6 +2167,38 @@ entries: - assets/bitnami/airflow-13.1.7.tgz version: 13.1.7 amd-gpu: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: AMD GPU Device Plugin + catalog.cattle.io/kube-version: '>= 1.18.0-0' + catalog.cattle.io/release-name: amd-gpu + apiVersion: v2 + appVersion: 1.25.2.5 + created: "2023-11-06T14:42:34.609017941Z" + dependencies: + - condition: nfd.enabled + name: node-feature-discovery + repository: file://./charts/node-feature-discovery + version: '>= 0.8.1-0' + description: A Helm chart for deploying Kubernetes AMD GPU device plugin + digest: ca1e60ba792e6ca4e1518b8c1814b4baba1c9bfca35893f897fb60ffc2f455e1 + home: https://github.com/RadeonOpenCompute/k8s-device-plugin + icon: https://raw.githubusercontent.com/RadeonOpenCompute/k8s-device-plugin/master/helm/logo.png + keywords: + - kubernetes + - cluster + - hardware + - gpu + kubeVersion: '>= 1.18.0-0' + maintainers: + - name: Kenny Ho + name: amd-gpu + sources: + - https://github.com/RadeonOpenCompute/k8s-device-plugin + type: application + urls: + - assets/amd/amd-gpu-0.10.0.tgz + version: 0.10.0 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: AMD GPU Device Plugin @@ -2145,8 +2234,8 @@ entries: argo-cd: - annotations: artifacthub.io/changes: | - - kind: added - description: Add notification cluster role support + - kind: changed + description: Upgrade Argo CD to v2.9.0 artifacthub.io/signKey: | fingerprint: 2B8F22F57260EFA67BE1C5824B11F800CD9D2252 url: https://argoproj.github.io/argo-helm/pgp_keys.asc @@ -2156,8 +2245,8 @@ entries: catalog.cattle.io/kube-version: '>=1.23.0-0' catalog.cattle.io/release-name: argo-cd apiVersion: v2 - appVersion: v2.8.5 - created: "2023-10-31T13:39:42.763394267Z" + appVersion: v2.9.0 + created: "2023-11-06T14:43:12.698967005Z" dependencies: - condition: redis-ha.enabled name: redis-ha @@ -2165,7 +2254,46 @@ entries: version: 4.23.0 description: A Helm chart for Argo CD, a declarative, GitOps continuous delivery tool for Kubernetes. - digest: d9d983efbd013780bcc3b09633b724cb3b1457f578de4953c78d9217fbe89d4c + digest: 98e3940e05a80f96977e8dbe9fbddca8bc081579d4f0fa16e61e2cdda901c4bd + home: https://github.com/argoproj/argo-helm + icon: https://argo-cd.readthedocs.io/en/stable/assets/logo.png + keywords: + - argoproj + - argocd + - gitops + kubeVersion: '>=1.23.0-0' + maintainers: + - name: argoproj + url: https://argoproj.github.io/ + name: argo-cd + sources: + - https://github.com/argoproj/argo-helm/tree/main/charts/argo-cd + - https://github.com/argoproj/argo-cd + urls: + - assets/argo/argo-cd-5.51.0.tgz + version: 5.51.0 + - annotations: + artifacthub.io/changes: | + - kind: added + description: Add notification cluster role support + artifacthub.io/signKey: | + fingerprint: 2B8F22F57260EFA67BE1C5824B11F800CD9D2252 + url: https://argoproj.github.io/argo-helm/pgp_keys.asc + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Argo CD + catalog.cattle.io/kube-version: '>=1.23.0-0' + catalog.cattle.io/release-name: argo-cd + apiVersion: v2 + appVersion: v2.8.5 + created: "2023-11-06T14:42:36.163317906Z" + dependencies: + - condition: redis-ha.enabled + name: redis-ha + repository: file://./charts/redis-ha + version: 4.23.0 + description: A Helm chart for Argo CD, a declarative, GitOps continuous delivery + tool for Kubernetes. + digest: 8c6bee7bd91461a995837b4fa0292d20f3de734a72f1df6cf106e1ea08757ec5 home: https://github.com/argoproj/argo-helm icon: https://argo-cd.readthedocs.io/en/stable/assets/logo.png keywords: @@ -10717,6 +10845,48 @@ entries: - assets/asserts/asserts-1.6.0.tgz version: 1.6.0 cassandra: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Apache Cassandra + catalog.cattle.io/kube-version: '>=1.19-0' + catalog.cattle.io/release-name: cassandra + category: Database + images: | + - name: cassandra-exporter + image: docker.io/bitnami/cassandra-exporter:2.3.8-debian-11-r429 + - name: cassandra + image: docker.io/bitnami/cassandra:4.1.3-debian-11-r71 + - name: os-shell + image: docker.io/bitnami/os-shell:11-debian-11-r90 + licenses: Apache-2.0 + apiVersion: v2 + appVersion: 4.1.3 + created: "2023-11-06T14:43:15.012260784Z" + dependencies: + - name: common + repository: file://./charts/common + tags: + - bitnami-common + version: 2.x.x + description: Apache Cassandra is an open source distributed database management + system designed to handle large amounts of data across many servers, providing + high availability with no single point of failure. + digest: fae288c9ac3410a2cda13b8059f17fade5561ea5c17250578a2b43748e83ca1c + home: https://bitnami.com + icon: https://svn.apache.org/repos/asf/comdev/project-logos/originals/cassandra-4.svg + keywords: + - cassandra + - database + - nosql + maintainers: + - name: VMware, Inc. + url: https://github.com/bitnami/charts + name: cassandra + sources: + - https://github.com/bitnami/charts/tree/main/bitnami/cassandra + urls: + - assets/bitnami/cassandra-10.6.0.tgz + version: 10.6.0 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: Apache Cassandra @@ -14471,6 +14641,47 @@ entries: - assets/confluent/confluent-for-kubernetes-0.174.2101.tgz version: 0.174.2101 consul: + - annotations: + artifacthub.io/images: | + - name: consul + image: hashicorp/consul:1.16.3 + - name: consul-k8s-control-plane + image: hashicorp/consul-k8s-control-plane:1.2.3 + - name: consul-dataplane + image: hashicorp/consul-dataplane:1.2.3 + - name: envoy + image: envoyproxy/envoy:v1.25.11 + artifacthub.io/license: MPL-2.0 + artifacthub.io/links: | + - name: Documentation + url: https://www.consul.io/docs/k8s + - name: hashicorp/consul + url: https://github.com/hashicorp/consul + - name: hashicorp/consul-k8s + url: https://github.com/hashicorp/consul-k8s + artifacthub.io/prerelease: "false" + artifacthub.io/signKey: | + fingerprint: C874011F0AB405110D02105534365D9472D7468F + url: https://keybase.io/hashicorp/pgp_keys.asc + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Hashicorp Consul + catalog.cattle.io/kube-version: '>=1.22.0-0' + catalog.cattle.io/release-name: consul + apiVersion: v2 + appVersion: 1.16.3 + created: "2023-11-06T14:43:22.136587319Z" + description: Official HashiCorp Consul Chart + digest: 92722291861103fe39699870be402cedcc8a938926c088cdd4868980e4313ac3 + home: https://www.consul.io + icon: https://raw.githubusercontent.com/hashicorp/consul-k8s/main/assets/icon.png + kubeVersion: '>=1.22.0-0' + name: consul + sources: + - https://github.com/hashicorp/consul + - https://github.com/hashicorp/consul-k8s + urls: + - assets/hashicorp/consul-1.2.3.tgz + version: 1.2.3 - annotations: artifacthub.io/images: | - name: consul @@ -15060,8 +15271,8 @@ entries: catalog.cattle.io/featured: "1" catalog.cattle.io/release-name: cost-analyzer apiVersion: v2 - appVersion: 1.106.4 - created: "2023-10-26T13:20:38.003506945Z" + appVersion: 1.107.0 + created: "2023-11-06T14:43:49.641713882Z" dependencies: - condition: global.grafana.enabled name: grafana @@ -15077,7 +15288,38 @@ entries: version: ~0.29.0 description: A Helm chart that sets up Kubecost, Prometheus, and Grafana to monitor cloud costs. - digest: 342e75e87b3f210059cc71976b2262b07aff6f8d8d8175b3807d88ca649ea7b1 + digest: 60ef220bb098b34a131a47274a4c46168d9fcd1297b75986233188b02488a1c3 + icon: https://partner-charts.rancher.io/assets/logos/kubecost.png + name: cost-analyzer + urls: + - assets/kubecost/cost-analyzer-1.107.0.tgz + version: 1.107.0 + - annotations: + artifacthub.io/links: | + - name: Homepage + url: https://www.kubecost.com + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Kubecost + catalog.cattle.io/release-name: cost-analyzer + apiVersion: v2 + appVersion: 1.106.4 + created: "2023-11-06T14:43:25.836188232Z" + dependencies: + - condition: global.grafana.enabled + name: grafana + repository: file://./charts/grafana + version: ~1.17.2 + - condition: global.prometheus.enabled + name: prometheus + repository: file://./charts/prometheus + version: ~11.0.2 + - condition: global.thanos.enabled + name: thanos + repository: file://./charts/thanos + version: ~0.29.0 + description: A Helm chart that sets up Kubecost, Prometheus, and Grafana to monitor + cloud costs. + digest: effba5cada42990036bb0ff22b3bac27df5d86861543b4fe46ab74a1b1854814 icon: https://partner-charts.rancher.io/assets/logos/kubecost.png name: cost-analyzer urls: @@ -16910,6 +17152,42 @@ entries: - assets/dell/csi-vxflexos-2.1.0.tgz version: 2.1.0 csi-wekafsplugin: + - annotations: + artifacthub.io/category: storage + artifacthub.io/containsSecurityUpdates: "true" + artifacthub.io/license: Apache-2.0 + artifacthub.io/prerelease: "false" + artifacthub.io/signKey: | + fingerprint: BA9F2D31BE9193E01FA17450BCE0A5CF67AC0C59 + url: https://weka.github.io/csi-wekafs/csi-public.gpg + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: WekaFS CSI Driver + catalog.cattle.io/kube-version: '>=1.18.0' + catalog.cattle.io/release-name: csi-wekafsplugin + apiVersion: v2 + appVersion: v2.3.1 + created: "2023-11-06T14:43:53.464722523Z" + description: Helm chart for Deployment of WekaIO Container Storage Interface (CSI) + plugin for WekaFS - the world fastest filesystem + digest: 8822fba759ef2768470da8b01a2f54941609054302bb20840af0c781a960863a + home: https://github.com/weka/csi-wekafs + icon: https://weka.github.io/csi-wekafs/logo.png + keywords: + - storage + - filesystem + - HPC + kubeVersion: '>=1.18.0' + maintainers: + - email: csi@weka.io + name: WekaIO, Inc. + url: https://weka.io + name: csi-wekafsplugin + sources: + - https://github.com/weka/csi-wekafs/tree/v2.3.1 + type: application + urls: + - assets/weka/csi-wekafsplugin-2.3.1.tgz + version: 2.3.1 - annotations: artifacthub.io/category: storage artifacthub.io/containsSecurityUpdates: "true" @@ -17248,6 +17526,43 @@ entries: - assets/weka/csi-wekafsplugin-0.6.400.tgz version: 0.6.400 datadog: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Datadog + catalog.cattle.io/kube-version: '>=1.10-0' + catalog.cattle.io/release-name: datadog + apiVersion: v1 + appVersion: "7" + created: "2023-11-06T14:43:20.838829883Z" + dependencies: + - condition: clusterAgent.metricsProvider.useDatadogMetrics + name: datadog-crds + repository: https://helm.datadoghq.com + tags: + - install-crds + version: 1.0.1 + - condition: datadog.kubeStateMetricsEnabled + name: kube-state-metrics + repository: https://prometheus-community.github.io/helm-charts + version: 2.13.2 + description: Datadog Agent + digest: ac9848f79730ea67fbbd563e608dacc85bfa5f3efb6646e848e7cef7efe41324 + home: https://www.datadoghq.com + icon: https://datadog-live.imgix.net/img/dd_logo_70x75.png + keywords: + - monitoring + - alerting + - metric + maintainers: + - email: support@datadoghq.com + name: Datadog + name: datadog + sources: + - https://app.datadoghq.com/account/settings#agent/kubernetes + - https://github.com/DataDog/datadog-agent + urls: + - assets/datadog/datadog-3.43.1.tgz + version: 3.43.1 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: Datadog @@ -21893,6 +22208,38 @@ entries: - assets/f5/f5-bigip-ctlr-0.0.1901.tgz version: 0.0.1901 falcon-sensor: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: CrowdStrike Falcon Platform + catalog.cattle.io/kube-version: '>1.22.0-0' + catalog.cattle.io/release-name: falcon-sensor + apiVersion: v2 + appVersion: 1.23.1 + created: "2023-11-06T14:43:19.958015157Z" + description: A Helm chart to deploy CrowdStrike Falcon sensors into Kubernetes + clusters. + digest: e3557b6850d0f63f0d8d351ce6612552550931bc7b434538679a8d430ffc9f4e + home: https://crowdstrike.com + icon: https://raw.githubusercontent.com/CrowdStrike/falcon-helm/main/images/crowdstrike-logo.svg + keywords: + - CrowdStrike + - Falcon + - EDR + - kubernetes + - security + - monitoring + - alerting + kubeVersion: '>1.22.0-0' + maintainers: + - email: integrations@crowdstrike.com + name: CrowdStrike Solutions Architecture + name: falcon-sensor + sources: + - https://github.com/CrowdStrike/falcon-helm + type: application + urls: + - assets/crowdstrike/falcon-sensor-1.23.1.tgz + version: 1.23.1 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: CrowdStrike Falcon Platform @@ -24507,6 +24854,35 @@ entries: - assets/gopaddle/gopaddle-4.2.5.tgz version: 4.2.5 haproxy: + - annotations: + artifacthub.io/changes: | + - Use Ingress Controller 1.10.9 version for base image + - Set allowPrivilegeEscalation to false by default + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: HAProxy Kubernetes Ingress Controller + catalog.cattle.io/kube-version: '>=1.22.0-0' + catalog.cattle.io/release-name: haproxy + apiVersion: v2 + appVersion: 1.10.9 + created: "2023-11-06T14:43:21.882044515Z" + description: A Helm chart for HAProxy Kubernetes Ingress Controller + digest: 9a561ff0a7a8aab2f4f706d6cc2842ea6ad694e0477422a6690281c046ebfd95 + home: https://github.com/haproxytech/helm-charts/tree/main/kubernetes-ingress + icon: https://raw.githubusercontent.com/haproxytech/helm-charts/main/kubernetes-ingress/chart-icon.png + keywords: + - ingress + - haproxy + kubeVersion: '>=1.22.0-0' + maintainers: + - email: dkorunic@haproxy.com + name: Dinko Korunic + name: haproxy + sources: + - https://github.com/haproxytech/kubernetes-ingress + type: application + urls: + - assets/haproxy/haproxy-1.34.0.tgz + version: 1.34.0 - annotations: artifacthub.io/changes: | - Use Ingress Controller 1.10.8 version for base image @@ -25328,6 +25704,37 @@ entries: - assets/haproxy/haproxy-1.4.300.tgz version: 1.4.300 harbor: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Harbor + catalog.cattle.io/kube-version: '>=1.20-0' + catalog.cattle.io/release-name: harbor + apiVersion: v1 + appVersion: 2.9.1 + created: "2023-11-06T14:43:21.949940903Z" + description: An open source trusted cloud native registry that stores, signs, + and scans content + digest: c551836ec1d6c09facb76facd084b98b9980e194ecf67e9c361c0ae5c50cf63b + home: https://goharbor.io + icon: https://raw.githubusercontent.com/goharbor/website/main/static/img/logos/harbor-icon-color.png + keywords: + - docker + - registry + - harbor + maintainers: + - email: yinw@vmware.com + name: Wenkai Yin + - email: hweiwei@vmware.com + name: Weiwei He + - email: yshengwen@vmware.com + name: Shengwen Yu + name: harbor + sources: + - https://github.com/goharbor/harbor + - https://github.com/goharbor/harbor-helm + urls: + - assets/harbor/harbor-1.13.1.tgz + version: 1.13.1 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: Harbor @@ -30093,6 +30500,34 @@ entries: - assets/trilio/k8s-triliovault-operator-v2.0.200.tgz version: v2.0.200 k10: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: K10 + catalog.cattle.io/kube-version: '>= 1.17.0-0' + catalog.cattle.io/release-name: k10 + apiVersion: v2 + appVersion: 6.0.12 + created: "2023-11-06T14:43:24.567331565Z" + dependencies: + - condition: grafana.enabled + name: grafana + repository: file://./charts/grafana + version: 6.60.6 + - condition: prometheus.server.enabled + name: prometheus + repository: file://./charts/prometheus + version: 23.3.0 + description: Kasten’s K10 Data Management Platform + digest: 82e79e23da1f497032c1af93bba27de0767b44bc2c900b16a9d1f7e9b393fa4c + home: https://kasten.io/ + icon: https://docs.kasten.io/_static/logo-kasten-k10-blue-white.png + maintainers: + - email: contact@kasten.io + name: kastenIO + name: k10 + urls: + - assets/kasten/k10-6.0.1201.tgz + version: 6.0.1201 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: K10 @@ -31006,6 +31441,58 @@ entries: - assets/kasten/k10-4.5.900.tgz version: 4.5.900 kafka: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Apache Kafka + catalog.cattle.io/kube-version: '>=1.19-0' + catalog.cattle.io/release-name: kafka + category: Infrastructure + images: | + - name: jmx-exporter + image: docker.io/bitnami/jmx-exporter:0.19.0-debian-11-r95 + - name: kafka-exporter + image: docker.io/bitnami/kafka-exporter:1.7.0-debian-11-r132 + - name: kafka + image: docker.io/bitnami/kafka:3.6.0-debian-11-r0 + - name: kubectl + image: docker.io/bitnami/kubectl:1.28.2-debian-11-r16 + - name: os-shell + image: docker.io/bitnami/os-shell:11-debian-11-r90 + licenses: Apache-2.0 + apiVersion: v2 + appVersion: 3.6.0 + created: "2023-11-06T14:43:15.86863208Z" + dependencies: + - condition: zookeeper.enabled + name: zookeeper + repository: file://./charts/zookeeper + version: 12.x.x + - name: common + repository: file://./charts/common + tags: + - bitnami-common + version: 2.x.x + description: Apache Kafka is a distributed streaming platform designed to build + real-time pipelines and can be used as a message broker or as a replacement + for a log aggregation solution for big data applications. + digest: 8342e04184a5617f3d0603de49730d3fc12fc31e07994c27be5c3fc09be6019b + home: https://bitnami.com + icon: https://svn.apache.org/repos/asf/comdev/project-logos/originals/kafka.svg + keywords: + - kafka + - zookeeper + - streaming + - producer + - consumer + maintainers: + - name: VMware, Inc. + url: https://github.com/bitnami/charts + name: kafka + sources: + - https://github.com/bitnami/charts/tree/main/bitnami/kafka + urls: + - assets/bitnami/kafka-26.2.1.tgz + version: 26.2.1 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: Apache Kafka @@ -34166,6 +34653,31 @@ entries: - assets/elastic/kibana-7.17.3.tgz version: 7.17.3 kong: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Kong Gateway + catalog.cattle.io/release-name: kong + apiVersion: v2 + appVersion: "3.4" + created: "2023-11-06T14:43:25.265044606Z" + dependencies: + - condition: postgresql.enabled + name: postgresql + repository: file://./charts/postgresql + version: 11.9.13 + description: The Cloud-Native Ingress and API-management + digest: d1651867a76391201c155d62324e3138b8e9c9ebad6c4f936ef9995d21b87290 + home: https://konghq.com/ + icon: https://s3.amazonaws.com/downloads.kong/universe/assets/icon-kong-inc-large.png + maintainers: + - email: team-k8s@konghq.com + name: team-k8s-bot + name: kong + sources: + - https://github.com/Kong/charts/tree/main/charts/kong + urls: + - assets/kong/kong-2.31.0.tgz + version: 2.31.0 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: Kong Gateway @@ -39255,6 +39767,50 @@ entries: - assets/minio/minio-operator-4.4.1700.tgz version: 4.4.1700 mysql: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: MySQL + catalog.cattle.io/kube-version: '>=1.19-0' + catalog.cattle.io/release-name: mysql + category: Database + images: | + - name: mysql + image: docker.io/bitnami/mysql:8.0.35-debian-11-r0 + - name: mysqld-exporter + image: docker.io/bitnami/mysqld-exporter:0.15.0-debian-11-r70 + - name: os-shell + image: docker.io/bitnami/os-shell:11-debian-11-r90 + licenses: Apache-2.0 + apiVersion: v2 + appVersion: 8.0.35 + created: "2023-11-06T14:43:16.199276936Z" + dependencies: + - name: common + repository: file://./charts/common + tags: + - bitnami-common + version: 2.x.x + description: MySQL is a fast, reliable, scalable, and easy to use open source + relational database system. Designed to handle mission-critical, heavy-load + production applications. + digest: 82b604ce1a2145209853ebdd64a6b8d0333f377d3cb63dcc4429ad894765a9cc + home: https://bitnami.com + icon: https://www.mysql.com/common/logos/logo-mysql-170x115.png + keywords: + - mysql + - database + - sql + - cluster + - high availability + maintainers: + - name: VMware, Inc. + url: https://github.com/bitnami/charts + name: mysql + sources: + - https://github.com/bitnami/charts/tree/main/bitnami/mysql + urls: + - assets/bitnami/mysql-9.14.2.tgz + version: 9.14.2 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: MySQL @@ -40587,6 +41143,31 @@ entries: - assets/bitnami/mysql-9.4.1.tgz version: 9.4.1 nats: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: NATS Server + catalog.cattle.io/kube-version: '>=1.16-0' + catalog.cattle.io/release-name: nats + apiVersion: v2 + appVersion: 2.10.4 + created: "2023-11-06T14:43:50.046249951Z" + description: A Helm chart for the NATS.io High Speed Cloud Native Distributed + Communications Technology. + digest: 680ed59ac92528ae9cc9103ed05054868fbf95de0a63fd8c23cdaa2cd0a2eac7 + home: http://github.com/nats-io/k8s + icon: https://nats.io/img/nats-icon-color.png + keywords: + - nats + - messaging + - cncf + maintainers: + - email: info@nats.io + name: The NATS Authors + url: https://github.com/nats-io + name: nats + urls: + - assets/nats/nats-1.1.4.tgz + version: 1.1.4 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: NATS Server @@ -41316,6 +41897,32 @@ entries: - assets/nats/nats-0.10.0.tgz version: 0.10.0 nginx-ingress: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: NGINX Ingress Controller + catalog.cattle.io/kube-version: '>= 1.22.0-0' + catalog.cattle.io/release-name: nginx-ingress + apiVersion: v2 + appVersion: 3.3.2 + created: "2023-11-06T14:43:21.409339623Z" + description: NGINX Ingress Controller + digest: b9105b9c60048e0dca19276da353416ee26e7b94e0c15d02d99dcd149a181a89 + home: https://github.com/nginxinc/kubernetes-ingress + icon: https://raw.githubusercontent.com/nginxinc/kubernetes-ingress/v3.3.2/deployments/helm-chart/chart-icon.png + keywords: + - ingress + - nginx + kubeVersion: '>= 1.22.0-0' + maintainers: + - email: kubernetes@nginx.com + name: nginxinc + name: nginx-ingress + sources: + - https://github.com/nginxinc/kubernetes-ingress/tree/v3.3.2/deployments/helm-chart + type: application + urls: + - assets/f5/nginx-ingress-1.0.2.tgz + version: 1.0.2 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: NGINX Ingress Controller @@ -46442,6 +47049,51 @@ entries: - assets/portworx/portworx-essentials-2.9.100.tgz version: 2.9.100 postgresql: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: PostgreSQL + catalog.cattle.io/kube-version: '>=1.19-0' + catalog.cattle.io/release-name: postgresql + category: Database + images: | + - name: os-shell + image: docker.io/bitnami/os-shell:11-debian-11-r90 + - name: postgres-exporter + image: docker.io/bitnami/postgres-exporter:0.15.0-debian-11-r0 + - name: postgresql + image: docker.io/bitnami/postgresql:16.0.0-debian-11-r14 + licenses: Apache-2.0 + apiVersion: v2 + appVersion: 16.0.0 + created: "2023-11-06T14:43:16.741217816Z" + dependencies: + - name: common + repository: file://./charts/common + tags: + - bitnami-common + version: 2.x.x + description: PostgreSQL (Postgres) is an open source object-relational database + known for reliability and data integrity. ACID-compliant, it supports foreign + keys, joins, views, triggers and stored procedures. + digest: 007bd376597c861a7e22a2f875efc65b7e8418c92021797aa09fe2cd5f478043 + home: https://bitnami.com + icon: https://wiki.postgresql.org/images/a/a4/PostgreSQL_logo.3colors.svg + keywords: + - postgresql + - postgres + - database + - sql + - replication + - cluster + maintainers: + - name: VMware, Inc. + url: https://github.com/bitnami/charts + name: postgresql + sources: + - https://github.com/bitnami/charts/tree/main/bitnami/postgresql + urls: + - assets/bitnami/postgresql-13.2.1.tgz + version: 13.2.1 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: PostgreSQL @@ -49621,6 +50273,50 @@ entries: - assets/quobyte/quobyte-cluster-0.1.5.tgz version: 0.1.5 redis: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Redis + catalog.cattle.io/kube-version: '>=1.19-0' + catalog.cattle.io/release-name: redis + category: Database + images: | + - name: os-shell + image: docker.io/bitnami/os-shell:11-debian-11-r90 + - name: redis-exporter + image: docker.io/bitnami/redis-exporter:1.55.0-debian-11-r0 + - name: redis-sentinel + image: docker.io/bitnami/redis-sentinel:7.2.3-debian-11-r0 + - name: redis + image: docker.io/bitnami/redis:7.2.3-debian-11-r0 + licenses: Apache-2.0 + apiVersion: v2 + appVersion: 7.2.3 + created: "2023-11-06T14:43:17.28573382Z" + dependencies: + - name: common + repository: file://./charts/common + tags: + - bitnami-common + version: 2.x.x + description: Redis(R) is an open source, advanced key-value store. It is often + referred to as a data structure server since keys can contain strings, hashes, + lists, sets and sorted sets. + digest: d120c5743e0cef724977a557e631cbb5a550f9d5d306f8a736157c69223dfffd + home: https://bitnami.com + icon: https://redis.com/wp-content/uploads/2021/08/redis-logo.png + keywords: + - redis + - keyvalue + - database + maintainers: + - name: VMware, Inc. + url: https://github.com/bitnami/charts + name: redis + sources: + - https://github.com/bitnami/charts/tree/main/bitnami/redis + urls: + - assets/bitnami/redis-18.2.1.tgz + version: 18.2.1 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: Redis @@ -51809,6 +52505,50 @@ entries: - assets/bitnami/redis-17.3.7.tgz version: 17.3.7 redpanda: + - annotations: + artifacthub.io/images: | + - name: redpanda + image: docker.redpanda.com/redpandadata/redpanda:v23.2.14 + - name: busybox + image: busybox:latest + - name: mintel/docker-alpine-bash-curl-jq + image: mintel/docker-alpine-bash-curl-jq:latest + artifacthub.io/license: Apache-2.0 + artifacthub.io/links: | + - name: Documentation + url: https://docs.redpanda.com + - name: "Helm (>= 3.8.0)" + url: https://helm.sh/docs/intro/install/ + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Redpanda + catalog.cattle.io/kube-version: '>=1.21-0' + catalog.cattle.io/release-name: redpanda + apiVersion: v2 + appVersion: v23.2.14 + created: "2023-11-06T14:43:52.105017898Z" + dependencies: + - condition: console.enabled + name: console + repository: file://./charts/console + version: '>=0.5 <1.0' + - condition: connectors.enabled + name: connectors + repository: file://./charts/connectors + version: '>=0.1.2 <1.0' + description: Redpanda is the real-time engine for modern apps. + digest: 9351935960bfe8b654daf13fb02911a531b43ffb924c14d593e0bc4e215e34b4 + icon: https://images.ctfassets.net/paqvtpyf8rwu/3cYHw5UzhXCbKuR24GDFGO/73fb682e6157d11c10d5b2b5da1d5af0/skate-stand-panda.svg + kubeVersion: '>=1.21-0' + maintainers: + - name: redpanda-data + url: https://github.com/orgs/redpanda-data/people + name: redpanda + sources: + - https://github.com/redpanda-data/helm-charts + type: application + urls: + - assets/redpanda/redpanda-5.6.38.tgz + version: 5.6.38 - annotations: artifacthub.io/images: | - name: redpanda @@ -56401,6 +57141,43 @@ entries: - assets/shipa/shipa-1.4.0.tgz version: 1.4.0 spark: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Apache Spark + catalog.cattle.io/kube-version: '>=1.19-0' + catalog.cattle.io/release-name: spark + category: Infrastructure + images: | + - name: spark + image: docker.io/bitnami/spark:3.5.0-debian-11-r12 + licenses: Apache-2.0 + apiVersion: v2 + appVersion: 3.5.0 + created: "2023-11-06T14:43:17.458888219Z" + dependencies: + - name: common + repository: file://./charts/common + tags: + - bitnami-common + version: 2.x.x + description: Apache Spark is a high-performance engine for large-scale computing + tasks, such as data processing, machine learning and real-time data streaming. + It includes APIs for Java, Python, Scala and R. + digest: 6b730296b15a29837b14d82702f83e41e4dfb0a380bfe660d6e97390a9de0b4a + home: https://bitnami.com + icon: https://www.apache.org/logos/res/spark/default.png + keywords: + - apache + - spark + maintainers: + - name: VMware, Inc. + url: https://github.com/bitnami/charts + name: spark + sources: + - https://github.com/bitnami/charts/tree/main/bitnami/spark + urls: + - assets/bitnami/spark-8.1.0.tgz + version: 8.1.0 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: Apache Spark @@ -57629,6 +58406,37 @@ entries: - assets/bitnami/spark-6.3.8.tgz version: 6.3.8 speedscale-operator: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Speedscale Operator + catalog.cattle.io/kube-version: '>= 1.17.0-0' + catalog.cattle.io/release-name: speedscale-operator + apiVersion: v1 + appVersion: 1.4.5 + created: "2023-11-06T14:43:52.297747804Z" + description: Stress test your APIs with real world scenarios. Collect and replay + traffic without scripting. + digest: 339ab097e24614e5b52a3338d37decd54503e53cef5380e406a2c5227fbafc7c + home: https://speedscale.com + icon: https://raw.githubusercontent.com/speedscale/assets/main/logo/gold_logo_only.png + keywords: + - speedscale + - test + - testing + - regression + - reliability + - load + - replay + - network + - traffic + kubeVersion: '>= 1.17.0-0' + maintainers: + - email: support@speedscale.com + name: Speedscale Support + name: speedscale-operator + urls: + - assets/speedscale/speedscale-operator-1.4.0.tgz + version: 1.4.0 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: Speedscale Operator @@ -59877,6 +60685,32 @@ entries: - assets/sumologic/sumologic-2.17.0.tgz version: 2.17.0 sysdig: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Sysdig + catalog.cattle.io/release-name: sysdig + apiVersion: v1 + appVersion: 12.17.1 + created: "2023-11-06T14:43:52.776217389Z" + deprecated: true + description: Sysdig Monitor and Secure agent + digest: b798202b535a6e25c341d8c26bf9e850794866cc9a26855a5e370f24bb86ec47 + home: https://www.sysdig.com/ + icon: https://avatars.githubusercontent.com/u/5068817?s=200&v=4 + keywords: + - monitoring + - security + - alerting + - metric + - troubleshooting + - run-time + name: sysdig + sources: + - https://app.sysdigcloud.com/#/settings/user + - https://github.com/draios/sysdig + urls: + - assets/sysdig/sysdig-1.16.20.tgz + version: 1.16.20 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: Sysdig @@ -61462,6 +62296,51 @@ entries: - assets/intel/tcs-issuer-0.1.0.tgz version: 0.1.0 tomcat: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Apache Tomcat + catalog.cattle.io/kube-version: '>=1.19-0' + catalog.cattle.io/release-name: tomcat + category: ApplicationServer + images: | + - name: jmx-exporter + image: docker.io/bitnami/jmx-exporter:0.19.0-debian-11-r95 + - name: os-shell + image: docker.io/bitnami/os-shell:11-debian-11-r90 + - name: tomcat + image: docker.io/bitnami/tomcat:10.1.15-debian-11-r0 + licenses: Apache-2.0 + apiVersion: v2 + appVersion: 10.1.15 + created: "2023-11-06T14:43:17.50059968Z" + dependencies: + - name: common + repository: file://./charts/common + tags: + - bitnami-common + version: 2.x.x + description: Apache Tomcat is an open-source web server designed to host and run + Java-based web applications. It is a lightweight server with a good performance + for applications running in production environments. + digest: 58722bd1db7504a241a5f948f2ab8f0ddc2f2701361a1d26dab755ceb29a0f50 + home: https://bitnami.com + icon: https://svn.apache.org/repos/asf/comdev/project-logos/originals/tomcat.svg + keywords: + - tomcat + - java + - http + - web + - application server + - jsp + maintainers: + - name: VMware, Inc. + url: https://github.com/bitnami/charts + name: tomcat + sources: + - https://github.com/bitnami/charts/tree/main/bitnami/tomcat + urls: + - assets/bitnami/tomcat-10.11.0.tgz + version: 10.11.0 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: Apache Tomcat @@ -65108,6 +65987,60 @@ entries: - assets/hashicorp/vault-0.22.0.tgz version: 0.22.0 wordpress: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: WordPress + catalog.cattle.io/kube-version: '>=1.19-0' + catalog.cattle.io/release-name: wordpress + category: CMS + images: | + - name: apache-exporter + image: docker.io/bitnami/apache-exporter:1.0.3-debian-11-r0 + - name: os-shell + image: docker.io/bitnami/os-shell:11-debian-11-r90 + - name: wordpress + image: docker.io/bitnami/wordpress:6.3.2-debian-11-r8 + licenses: Apache-2.0 + apiVersion: v2 + appVersion: 6.3.2 + created: "2023-11-06T14:43:19.182813745Z" + dependencies: + - condition: memcached.enabled + name: memcached + repository: file://./charts/memcached + version: 6.x.x + - condition: mariadb.enabled + name: mariadb + repository: file://./charts/mariadb + version: 14.x.x + - name: common + repository: file://./charts/common + tags: + - bitnami-common + version: 2.x.x + description: WordPress is the world's most popular blogging and content management + platform. Powerful yet simple, everyone from students to global corporations + use it to build beautiful, functional websites. + digest: 934c2c1c48b90a46da8977f3b835b1d4c5bcaf5821f6eae03123dd3b5508fd91 + home: https://bitnami.com + icon: https://s.w.org/style/images/about/WordPress-logotype-simplified.png + keywords: + - application + - blog + - cms + - http + - php + - web + - wordpress + maintainers: + - name: VMware, Inc. + url: https://github.com/bitnami/charts + name: wordpress + sources: + - https://github.com/bitnami/charts/tree/main/bitnami/wordpress + urls: + - assets/bitnami/wordpress-18.1.3.tgz + version: 18.1.3 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: WordPress @@ -70804,6 +71737,43 @@ entries: - assets/netfoundry/ziti-host-1.5.1.tgz version: 1.5.1 zookeeper: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Apache Zookeeper + catalog.cattle.io/kube-version: '>=1.19-0' + catalog.cattle.io/release-name: zookeeper + category: Infrastructure + images: | + - name: os-shell + image: docker.io/bitnami/os-shell:11-debian-11-r90 + - name: zookeeper + image: docker.io/bitnami/zookeeper:3.9.1-debian-11-r1 + licenses: Apache-2.0 + apiVersion: v2 + appVersion: 3.9.1 + created: "2023-11-06T14:43:19.364827421Z" + dependencies: + - name: common + repository: file://./charts/common + tags: + - bitnami-common + version: 2.x.x + description: Apache ZooKeeper provides a reliable, centralized register of configuration + data and services for distributed applications. + digest: 2839c96d281a581e17c4ba1c85b5a14546ac8114867d250e3278ea44bfd61016 + home: https://bitnami.com + icon: https://svn.apache.org/repos/asf/comdev/project-logos/originals/zookeeper.svg + keywords: + - zookeeper + maintainers: + - name: VMware, Inc. + url: https://github.com/bitnami/charts + name: zookeeper + sources: + - https://github.com/bitnami/charts/tree/main/bitnami/zookeeper + urls: + - assets/bitnami/zookeeper-12.3.0.tgz + version: 12.3.0 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: Apache Zookeeper