Charts CI

```
Updated:
  argo/argo-cd:
    - 5.36.0
  bitnami/kafka:
    - 22.1.5
  bitnami/mariadb:
    - 12.2.5
  bitnami/wordpress:
    - 16.1.13
  btp/chronicle:
    - 0.1.15
  crate/crate-operator:
    - 2.29.0
  digitalis/vals-operator:
    - 0.7.4
  intel/intel-device-plugins-operator:
    - 0.26.1
  intel/intel-device-plugins-qat:
    - 0.26.1
  intel/intel-device-plugins-sgx:
    - 0.26.1
  kubecost/cost-analyzer:
    - 1.104.0
  new-relic/nri-bundle:
    - 5.0.18
  speedscale/speedscale-operator:
    - 1.3.10
```
pull/782/head
github-actions[bot] 2023-06-08 14:17:55 +00:00
parent 585c47c337
commit c82a911aad
118 changed files with 1642 additions and 364 deletions

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -1,9 +1,7 @@
annotations:
artifacthub.io/changes: |
- kind: changed
description: Upgrade Argo CD to v2.7.4
- kind: added
description: Update knownHosts
description: Add .Values.global.env for all deployed containers
artifacthub.io/signKey: |
fingerprint: 2B8F22F57260EFA67BE1C5824B11F800CD9D2252
url: https://argoproj.github.io/argo-helm/pgp_keys.asc
@ -34,4 +32,4 @@ name: argo-cd
sources:
- https://github.com/argoproj/argo-helm/tree/main/charts/argo-cd
- https://github.com/argoproj/argo-cd
version: 5.35.1
version: 5.36.0

View File

@ -407,6 +407,7 @@ NAME: my-release
| global.affinity.podAntiAffinity | string | `"soft"` | Default pod anti-affinity rules. Either: `none`, `soft` or `hard` |
| global.deploymentAnnotations | object | `{}` | Annotations for the all deployed Deployments |
| global.deploymentStrategy | object | `{}` | Deployment strategy for the all deployed Deployments |
| global.env | list | `[]` | Environment variables to pass to all deployed Deployments |
| global.hostAliases | list | `[]` | Mapping between IP and hostnames that will be injected as entries in the pod's hosts files |
| global.image.imagePullPolicy | string | `"IfNotPresent"` | If defined, a imagePullPolicy applied to all Argo CD deployments |
| global.image.repository | string | `"quay.io/argoproj/argocd"` | If defined, a repository applied to all Argo CD deployments |

View File

@ -99,7 +99,7 @@ spec:
imagePullPolicy: {{ default .Values.global.image.imagePullPolicy .Values.controller.image.imagePullPolicy }}
name: {{ .Values.controller.name }}
env:
{{- with .Values.controller.env }}
{{- with (concat .Values.global.env .Values.controller.env) }}
{{- toYaml . | nindent 10 }}
{{- end }}
- name: ARGOCD_CONTROLLER_REPLICAS

View File

@ -79,7 +79,7 @@ spec:
{{- toYaml . | nindent 12 }}
{{- end }}
env:
{{- with .Values.applicationSet.extraEnv }}
{{- with (concat .Values.global.env .Values.applicationSet.extraEnv) }}
{{- toYaml . | nindent 12 }}
{{- end }}
- name: NAMESPACE

View File

@ -66,7 +66,7 @@ spec:
{{- range .Values.notifications.extraArgs }}
- {{ . | squote }}
{{- end }}
{{- with .Values.notifications.extraEnv }}
{{- with (concat .Values.global.env .Values.notifications.extraEnv) }}
env:
{{- toYaml . | nindent 12 }}
{{- end }}

View File

@ -77,7 +77,7 @@ spec:
{{- toYaml . | nindent 8 }}
{{- end }}
env:
{{- with .Values.repoServer.env }}
{{- with (concat .Values.global.env .Values.repoServer.env) }}
{{- toYaml . | nindent 10 }}
{{- end }}
{{- if .Values.openshift.enabled }}

View File

@ -74,7 +74,7 @@ spec:
{{- toYaml . | nindent 8 }}
{{- end }}
env:
{{- with .Values.server.env }}
{{- with (concat .Values.global.env .Values.server.env) }}
{{- toYaml . | nindent 10 }}
{{- end }}
- name: ARGOCD_SERVER_INSECURE

View File

@ -70,7 +70,7 @@ spec:
{{- toYaml . | nindent 8 }}
{{- end }}
env:
{{- with .Values.dex.env }}
{{- with (concat .Values.global.env .Values.dex.env) }}
{{- toYaml . | nindent 10 }}
{{- end }}
- name: ARGOCD_DEX_SERVER_DISABLE_TLS

View File

@ -61,7 +61,7 @@ spec:
{{- with .Values.redis.extraArgs }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.redis.env }}
{{- with (concat .Values.global.env .Values.redis.env) }}
env:
{{- toYaml . | nindent 8 }}
{{- end }}
@ -92,7 +92,7 @@ spec:
value: {{ printf "redis://localhost:%v" .Values.redis.containerPorts.redis }}
- name: REDIS_EXPORTER_WEB_LISTEN_ADDRESS
value: {{ printf "0.0.0.0:%v" .Values.redis.containerPorts.metrics }}
{{- with .Values.redis.exporter.env }}
{{- with (concat .Values.global.env .Values.redis.exporter.env) }}
{{- toYaml . | nindent 8 }}
{{- end }}
ports:

View File

@ -144,6 +144,9 @@ global:
# maxSurge: 25%
# maxUnavailable: 25%
# -- Environment variables to pass to all deployed Deployments
env: []
## Argo Configs
configs:
# General Argo CD configuration

View File

@ -1,9 +1,9 @@
dependencies:
- name: zookeeper
repository: oci://registry-1.docker.io/bitnamicharts
version: 11.4.1
version: 11.4.2
- name: common
repository: oci://registry-1.docker.io/bitnamicharts
version: 2.4.0
digest: sha256:c28eb28d9c6cfe9cc712ea78f7c46c635f11ae787c7cec083ef79162ed6e048a
generated: "2023-05-21T14:41:39.743785146Z"
digest: sha256:d0d3db738ca58fe404cf471499d6cc66827a3480835f4cab0de5053c9684950e
generated: "2023-06-07T04:12:40.544851481Z"

View File

@ -6,7 +6,7 @@ annotations:
category: Infrastructure
licenses: Apache-2.0
apiVersion: v2
appVersion: 3.4.0
appVersion: 3.4.1
dependencies:
- condition: zookeeper.enabled
name: zookeeper
@ -34,4 +34,4 @@ maintainers:
name: kafka
sources:
- https://github.com/bitnami/charts/tree/main/bitnami/kafka
version: 22.1.4
version: 22.1.5

View File

@ -80,7 +80,7 @@ The command removes all the Kubernetes components associated with the chart and
| ------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------- |
| `image.registry` | Kafka image registry | `docker.io` |
| `image.repository` | Kafka image repository | `bitnami/kafka` |
| `image.tag` | Kafka image tag (immutable tags are recommended) | `3.4.0-debian-11-r33` |
| `image.tag` | Kafka image tag (immutable tags are recommended) | `3.4.1-debian-11-r0` |
| `image.digest` | Kafka image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `image.pullPolicy` | Kafka image pull policy | `IfNotPresent` |
| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` |
@ -254,7 +254,7 @@ The command removes all the Kubernetes components associated with the chart and
| `externalAccess.autoDiscovery.enabled` | Enable using an init container to auto-detect external IPs/ports by querying the K8s API | `false` |
| `externalAccess.autoDiscovery.image.registry` | Init container auto-discovery image registry | `docker.io` |
| `externalAccess.autoDiscovery.image.repository` | Init container auto-discovery image repository | `bitnami/kubectl` |
| `externalAccess.autoDiscovery.image.tag` | Init container auto-discovery image tag (immutable tags are recommended) | `1.25.10-debian-11-r0` |
| `externalAccess.autoDiscovery.image.tag` | Init container auto-discovery image tag (immutable tags are recommended) | `1.25.10-debian-11-r6` |
| `externalAccess.autoDiscovery.image.digest` | Kubectl image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `externalAccess.autoDiscovery.image.pullPolicy` | Init container auto-discovery image pull policy | `IfNotPresent` |
| `externalAccess.autoDiscovery.image.pullSecrets` | Init container auto-discovery image pull secrets | `[]` |
@ -310,7 +310,7 @@ The command removes all the Kubernetes components associated with the chart and
| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume | `false` |
| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` |
| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/bitnami-shell` |
| `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `11-debian-11-r118` |
| `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `11-debian-11-r123` |
| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` |
| `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` |
@ -335,7 +335,7 @@ The command removes all the Kubernetes components associated with the chart and
| `metrics.kafka.enabled` | Whether or not to create a standalone Kafka exporter to expose Kafka metrics | `false` |
| `metrics.kafka.image.registry` | Kafka exporter image registry | `docker.io` |
| `metrics.kafka.image.repository` | Kafka exporter image repository | `bitnami/kafka-exporter` |
| `metrics.kafka.image.tag` | Kafka exporter image tag (immutable tags are recommended) | `1.6.0-debian-11-r89` |
| `metrics.kafka.image.tag` | Kafka exporter image tag (immutable tags are recommended) | `1.7.0-debian-11-r4` |
| `metrics.kafka.image.digest` | Kafka exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `metrics.kafka.image.pullPolicy` | Kafka exporter image pull policy | `IfNotPresent` |
| `metrics.kafka.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` |
@ -383,7 +383,7 @@ The command removes all the Kubernetes components associated with the chart and
| `metrics.jmx.enabled` | Whether or not to expose JMX metrics to Prometheus | `false` |
| `metrics.jmx.image.registry` | JMX exporter image registry | `docker.io` |
| `metrics.jmx.image.repository` | JMX exporter image repository | `bitnami/jmx-exporter` |
| `metrics.jmx.image.tag` | JMX exporter image tag (immutable tags are recommended) | `0.18.0-debian-11-r21` |
| `metrics.jmx.image.tag` | JMX exporter image tag (immutable tags are recommended) | `0.18.0-debian-11-r27` |
| `metrics.jmx.image.digest` | JMX exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `metrics.jmx.image.pullPolicy` | JMX exporter image pull policy | `IfNotPresent` |
| `metrics.jmx.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` |

View File

@ -1,6 +1,6 @@
dependencies:
- name: common
repository: oci://registry-1.docker.io/bitnamicharts
version: 2.2.5
digest: sha256:318f438acfeaced11d9060877d615caf1985417d2865810defaa886d3496f8d3
generated: "2023-05-08T19:52:25.626211407Z"
version: 2.4.0
digest: sha256:8c1a5dc923412d11d4d841420494b499cb707305c8b9f87f45ea1a8bf3172cb3
generated: "2023-05-21T17:05:21.743633346Z"

View File

@ -21,4 +21,4 @@ maintainers:
name: zookeeper
sources:
- https://github.com/bitnami/charts/tree/main/bitnami/zookeeper
version: 11.4.1
version: 11.4.2

View File

@ -8,8 +8,6 @@ Apache ZooKeeper provides a reliable, centralized register of configuration data
Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement.
Looking to use Apache ZooKeeper in production? Try [VMware Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog.
## TL;DR
```console
@ -82,7 +80,7 @@ The command removes all the Kubernetes components associated with the chart and
| ----------------------------- | -------------------------------------------------------------------------------------------------------------------------- | ----------------------- |
| `image.registry` | ZooKeeper image registry | `docker.io` |
| `image.repository` | ZooKeeper image repository | `bitnami/zookeeper` |
| `image.tag` | ZooKeeper image tag (immutable tags are recommended) | `3.8.1-debian-11-r31` |
| `image.tag` | ZooKeeper image tag (immutable tags are recommended) | `3.8.1-debian-11-r36` |
| `image.digest` | ZooKeeper image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `image.pullPolicy` | ZooKeeper image pull policy | `IfNotPresent` |
| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` |
@ -248,7 +246,7 @@ The command removes all the Kubernetes components associated with the chart and
| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume | `false` |
| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` |
| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/bitnami-shell` |
| `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `11-debian-11-r114` |
| `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `11-debian-11-r118` |
| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` |
| `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` |

View File

@ -2,10 +2,10 @@ annotations:
category: Infrastructure
licenses: Apache-2.0
apiVersion: v2
appVersion: 2.2.5
appVersion: 2.4.0
description: A Library Helm Chart for grouping common logic between bitnami charts.
This chart is not deployable by itself.
home: https://github.com/bitnami/charts/tree/main/bitnami/common
home: https://bitnami.com
icon: https://bitnami.com/downloads/logos/bitnami-mark.png
keywords:
- common
@ -14,11 +14,10 @@ keywords:
- function
- bitnami
maintainers:
- name: Bitnami
- name: VMware, Inc.
url: https://github.com/bitnami/charts
name: common
sources:
- https://github.com/bitnami/charts
- https://www.bitnami.com/
type: library
version: 2.2.5
version: 2.4.0

View File

@ -2,6 +2,8 @@
A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between Bitnami charts.
Looking to use our applications in production? Try [VMware Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog.
## TL;DR
```yaml

View File

@ -48,6 +48,17 @@ Return the appropriate apiVersion for cronjob.
{{- end -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for daemonset.
*/}}
{{- define "common.capabilities.daemonset.apiVersion" -}}
{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
{{- print "extensions/v1beta1" -}}
{{- else -}}
{{- print "apps/v1" -}}
{{- end -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for deployment.
*/}}
@ -141,6 +152,21 @@ Return the appropriate apiVersion for Horizontal Pod Autoscaler.
{{- end -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for Vertical Pod Autoscaler.
*/}}
{{- define "common.capabilities.vpa.apiVersion" -}}
{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}}
{{- if .beta2 -}}
{{- print "autoscaling/v2beta2" -}}
{{- else -}}
{{- print "autoscaling/v2beta1" -}}
{{- end -}}
{{- else -}}
{{- print "autoscaling/v2" -}}
{{- end -}}
{{- end -}}
{{/*
Returns true if the used Helm version is 3.3+.
A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure.

View File

@ -45,7 +45,7 @@ Return the proper Docker Image Registry Secret Names (deprecated: use common.ima
{{- if (not (empty $pullSecrets)) }}
imagePullSecrets:
{{- range $pullSecrets }}
{{- range $pullSecrets | uniq }}
- name: {{ . }}
{{- end }}
{{- end }}
@ -73,7 +73,7 @@ Return the proper Docker Image Registry Secret Names evaluating values as templa
{{- if (not (empty $pullSecrets)) }}
imagePullSecrets:
{{- range $pullSecrets }}
{{- range $pullSecrets | uniq }}
- name: {{ . }}
{{- end }}
{{- end }}

View File

@ -76,7 +76,7 @@ diagnosticMode:
image:
registry: docker.io
repository: bitnami/zookeeper
tag: 3.8.1-debian-11-r31
tag: 3.8.1-debian-11-r36
digest: ""
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
@ -660,7 +660,7 @@ volumePermissions:
image:
registry: docker.io
repository: bitnami/bitnami-shell
tag: 11-debian-11-r114
tag: 11-debian-11-r118
digest: ""
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.

View File

@ -77,7 +77,7 @@ diagnosticMode:
image:
registry: docker.io
repository: bitnami/kafka
tag: 3.4.0-debian-11-r33
tag: 3.4.1-debian-11-r0
digest: ""
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
@ -781,7 +781,7 @@ externalAccess:
image:
registry: docker.io
repository: bitnami/kubectl
tag: 1.25.10-debian-11-r0
tag: 1.25.10-debian-11-r6
digest: ""
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
@ -1030,7 +1030,7 @@ volumePermissions:
image:
registry: docker.io
repository: bitnami/bitnami-shell
tag: 11-debian-11-r118
tag: 11-debian-11-r123
digest: ""
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
@ -1112,7 +1112,7 @@ metrics:
image:
registry: docker.io
repository: bitnami/kafka-exporter
tag: 1.6.0-debian-11-r89
tag: 1.7.0-debian-11-r4
digest: ""
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
@ -1348,7 +1348,7 @@ metrics:
image:
registry: docker.io
repository: bitnami/jmx-exporter
tag: 0.18.0-debian-11-r21
tag: 0.18.0-debian-11-r27
digest: ""
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'

View File

@ -6,7 +6,7 @@ annotations:
category: Database
licenses: Apache-2.0
apiVersion: v2
appVersion: 10.11.3
appVersion: 10.11.4
dependencies:
- name: common
repository: file://./charts/common
@ -30,4 +30,4 @@ maintainers:
name: mariadb
sources:
- https://github.com/bitnami/charts/tree/main/bitnami/mariadb
version: 12.2.4
version: 12.2.5

View File

@ -84,7 +84,7 @@ The command removes all the Kubernetes components associated with the chart and
| -------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------- |
| `image.registry` | MariaDB image registry | `docker.io` |
| `image.repository` | MariaDB image repository | `bitnami/mariadb` |
| `image.tag` | MariaDB image tag (immutable tags are recommended) | `10.11.3-debian-11-r5` |
| `image.tag` | MariaDB image tag (immutable tags are recommended) | `10.11.4-debian-11-r0` |
| `image.digest` | MariaDB image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `image.pullPolicy` | MariaDB image pull policy | `IfNotPresent` |
| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` |
@ -306,7 +306,7 @@ The command removes all the Kubernetes components associated with the chart and
| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` | `false` |
| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` |
| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/bitnami-shell` |
| `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `11-debian-11-r118` |
| `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `11-debian-11-r123` |
| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` |
| `volumePermissions.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` |
@ -320,7 +320,7 @@ The command removes all the Kubernetes components associated with the chart and
| `metrics.enabled` | Start a side-car prometheus exporter | `false` |
| `metrics.image.registry` | Exporter image registry | `docker.io` |
| `metrics.image.repository` | Exporter image repository | `bitnami/mysqld-exporter` |
| `metrics.image.tag` | Exporter image tag (immutable tags are recommended) | `0.14.0-debian-11-r119` |
| `metrics.image.tag` | Exporter image tag (immutable tags are recommended) | `0.14.0-debian-11-r125` |
| `metrics.image.digest` | Exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `metrics.image.pullPolicy` | Exporter image pull policy | `IfNotPresent` |
| `metrics.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` |
@ -543,7 +543,7 @@ kubectl delete statefulset opencart-mariadb --cascade=false
## License
Copyright &copy; 2023 Bitnami
Copyright &copy; 2023 VMware, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@ -87,7 +87,7 @@ serviceBindings:
image:
registry: docker.io
repository: bitnami/mariadb
tag: 10.11.3-debian-11-r5
tag: 10.11.4-debian-11-r0
digest: ""
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
@ -1001,7 +1001,7 @@ volumePermissions:
image:
registry: docker.io
repository: bitnami/bitnami-shell
tag: 11-debian-11-r118
tag: 11-debian-11-r123
digest: ""
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace)
@ -1037,7 +1037,7 @@ metrics:
image:
registry: docker.io
repository: bitnami/mysqld-exporter
tag: 0.14.0-debian-11-r119
tag: 0.14.0-debian-11-r125
digest: ""
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace)

View File

@ -40,4 +40,4 @@ maintainers:
name: wordpress
sources:
- https://github.com/bitnami/charts/tree/main/bitnami/wordpress
version: 16.1.12
version: 16.1.13

View File

@ -76,15 +76,15 @@ The command removes all the Kubernetes components associated with the chart and
### WordPress Image parameters
| Name | Description | Value |
| ------------------- | --------------------------------------------------------------------------------------------------------- | -------------------- |
| `image.registry` | WordPress image registry | `docker.io` |
| `image.repository` | WordPress image repository | `bitnami/wordpress` |
| `image.tag` | WordPress image tag (immutable tags are recommended) | `6.2.2-debian-11-r9` |
| `image.digest` | WordPress image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `image.pullPolicy` | WordPress image pull policy | `IfNotPresent` |
| `image.pullSecrets` | WordPress image pull secrets | `[]` |
| `image.debug` | Specify if debug values should be set | `false` |
| Name | Description | Value |
| ------------------- | --------------------------------------------------------------------------------------------------------- | --------------------- |
| `image.registry` | WordPress image registry | `docker.io` |
| `image.repository` | WordPress image repository | `bitnami/wordpress` |
| `image.tag` | WordPress image tag (immutable tags are recommended) | `6.2.2-debian-11-r10` |
| `image.digest` | WordPress image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `image.pullPolicy` | WordPress image pull policy | `IfNotPresent` |
| `image.pullSecrets` | WordPress image pull secrets | `[]` |
| `image.debug` | Specify if debug values should be set | `false` |
### WordPress Configuration parameters
@ -247,7 +247,7 @@ The command removes all the Kubernetes components associated with the chart and
| `volumePermissions.enabled` | Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup` | `false` |
| `volumePermissions.image.registry` | Bitnami Shell image registry | `docker.io` |
| `volumePermissions.image.repository` | Bitnami Shell image repository | `bitnami/bitnami-shell` |
| `volumePermissions.image.tag` | Bitnami Shell image tag (immutable tags are recommended) | `11-debian-11-r122` |
| `volumePermissions.image.tag` | Bitnami Shell image tag (immutable tags are recommended) | `11-debian-11-r123` |
| `volumePermissions.image.digest` | Bitnami Shell image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `volumePermissions.image.pullPolicy` | Bitnami Shell image pull policy | `IfNotPresent` |
| `volumePermissions.image.pullSecrets` | Bitnami Shell image pull secrets | `[]` |
@ -279,7 +279,7 @@ The command removes all the Kubernetes components associated with the chart and
| `metrics.enabled` | Start a sidecar prometheus exporter to expose metrics | `false` |
| `metrics.image.registry` | Apache exporter image registry | `docker.io` |
| `metrics.image.repository` | Apache exporter image repository | `bitnami/apache-exporter` |
| `metrics.image.tag` | Apache exporter image tag (immutable tags are recommended) | `0.13.4-debian-11-r1` |
| `metrics.image.tag` | Apache exporter image tag (immutable tags are recommended) | `0.13.4-debian-11-r2` |
| `metrics.image.digest` | Apache exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `metrics.image.pullPolicy` | Apache exporter image pull policy | `IfNotPresent` |
| `metrics.image.pullSecrets` | Apache exporter image pull secrets | `[]` |

View File

@ -73,7 +73,7 @@ diagnosticMode:
image:
registry: docker.io
repository: bitnami/wordpress
tag: 6.2.2-debian-11-r9
tag: 6.2.2-debian-11-r10
digest: ""
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
@ -763,7 +763,7 @@ volumePermissions:
image:
registry: docker.io
repository: bitnami/bitnami-shell
tag: 11-debian-11-r122
tag: 11-debian-11-r123
digest: ""
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
@ -857,7 +857,7 @@ metrics:
image:
registry: docker.io
repository: bitnami/apache-exporter
tag: 0.13.4-debian-11-r1
tag: 0.13.4-debian-11-r2
digest: ""
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.

View File

@ -3,7 +3,7 @@ dependencies:
repository: https://btp-charts-stable.s3.amazonaws.com/charts/
version: 0.1.3
- name: sawtooth
repository: https://btp-charts-stable.s3.amazonaws.com/charts/
version: 0.2.11
digest: sha256:871a184a07b100d0c58c845b6a3a70c08f4aeed7dc0ac1bcecd1dc1a910b9558
generated: "2023-03-06T19:05:19.640433Z"
repository: https://btp-charts-unstable.s3.amazonaws.com/charts/
version: 0.2.12
digest: sha256:780720dfac6408ac363acd252c6232b5a405368dda73dcbe3d2e208bbe0d75e8
generated: "2023-06-01T16:05:51.598742033-04:00"

View File

@ -3,13 +3,13 @@ annotations:
catalog.cattle.io/display-name: Chronicle
catalog.cattle.io/release-name: chronicle
apiVersion: v2
appVersion: 0.6.2
appVersion: 0.7.3
dependencies:
- name: standard-defs
repository: https://btp-charts-stable.s3.amazonaws.com/charts/
version: ~0.1.0
- name: sawtooth
repository: https://btp-charts-stable.s3.amazonaws.com/charts/
repository: https://btp-charts-unstable.s3.amazonaws.com/charts/
version: ~0.2.0
description: 'Chronicle is an open-source, blockchain-backed, domain-agnostic provenance
product. Chronicle makes it easy for users to record and query immutable provenance
@ -22,4 +22,4 @@ keywords:
- blockchain
name: chronicle
type: application
version: 0.1.14
version: 0.1.15

View File

@ -7,4 +7,4 @@ dependencies:
description: BTP's Sawtooth distribution based on Hyperledger Sawtooth 1.2
name: sawtooth
type: application
version: 0.2.11
version: 0.2.12

View File

@ -80,6 +80,7 @@ data:
GENESIS_NODE=$(get_genesis_node)
"${BIN_DIR}/upsert_cm.sh" validator-public "$NODE_NAME" "$(cat /etc/sawtooth/keys/validator.pub)"
"${BIN_DIR}/upsert_cm.sh" validator-secret "$NODE_NAME" "$(cat /etc/sawtooth/keys/validator.priv)"
if [ "$GENESIS_NODE" = "$NODE_NAME" ]; then
export RUN_GENESIS=1

View File

@ -30,6 +30,10 @@ chronicle: {{ include "common.names.fullname" . }}
{{ include "lib.call-nested" (list . "sawtooth" "sawtooth.ports.sawcomp") | int }}
{{- end -}}
{{- define "chronicle.sawtooth.rest" -}}
{{ include "lib.call-nested" (list . "sawtooth" "sawtooth.ports.rest") | int }}
{{- end -}}
{{- define "chronicle.sawtooth.service" -}}
{{- $svc := include "lib.call-nested" (list . "sawtooth" "common.names.fullname") -}}
{{- $ns := .Release.Namespace -}}
@ -42,3 +46,31 @@ chronicle: {{ include "common.names.fullname" . }}
{{- toYaml .Values.affinity }}
{{- end -}}
{{- end -}}
{{- define "chronicle.api.service" -}}
{{ include "chronicle.service.name" . }}-chronicle-api
{{- end -}}
{{- define "chronicle.id-provider.service" -}}
{{ include "common.names.fullname" . }}-test-id-provider
{{- end -}}
{{- define "chronicle.jwksUrl" -}}
{{- if .Values.auth.jwks.url -}}
{{ .Values.auth.jwks.url }}
{{- else -}}
{{- if .Values.devIdProvider.enabled -}}
http://{{ include "chronicle.id-provider.service" . }}:8090/jwks
{{- else -}}
{{ required "devIdProvider.enabled must be true or auth.jwks.url must be set!" .Values.auth.jwks.url }}
{{- end -}}
{{- end -}}
{{- end -}}
{{- define "chronicle.userinfoUrl" -}}
{{ .Values.auth.userinfo.url }}
{{- end -}}
{{- define "chronicle.root-key.secret" -}}
{{ include "common.names.fullname" . }}-root-key
{{- end -}}

View File

@ -0,0 +1,228 @@
{{$stlServiceName := include "lib.call-nested" (list . "sawtooth" "common.names.fullname")}}
---
apiVersion: batch/v1
kind: Job
metadata:
annotations:
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
name: {{ include "common.names.fullname" . }}-init
labels: {{ include "chronicle.labels" . | nindent 4 }}
component: chronicle
spec:
template:
metadata:
labels: {{ include "chronicle.labels" . | nindent 8 }}
component: chronicle
spec:
restartPolicy: Never
serviceAccountName: {{ include "lib.serviceAccountName" . }}
automountServiceAccountToken: true
volumes: {{- include "lib.volumes" .Values.opa.tp.extraVolumes | nindent 8 }}
- name: shared-data
emptyDir: {}
initContainers:
- name: get-secret
image: alpine/k8s:1.24.13
command: [ "sh", "-ec" ]
args:
- |
if kubectl get secret {{ include "chronicle.root-key.secret" . }} -n {{.Release.Namespace}} >/dev/null 2>&1; then
echo "Secret found."
kubectl get secret {{ include "chronicle.root-key.secret" . }} -n {{.Release.Namespace}} -o jsonpath='{.data.*}' | base64 -d > /shared-data/root.pem
touch /shared-data/secret-found
else
echo "Secret not found."
fi
volumeMounts:
- name: shared-data
mountPath: /shared-data
- name: generate-secret
{{- include "lib.image" (dict "imageRoot" .Values.opa.opaInit.image "global" .Values.global ) | nindent 10 }}
command: [ "bash", "-ec"]
args:
- |
if [[ ! -f "/shared-data/root.pem" ]]; then
echo "Generating new root key."
opactl generate --output /shared-data/root.pem
else
echo "Root key already exists."
fi
env: {{ include "lib.safeToYaml" .Values.env | nindent 12 }}
- name: RUST_LOG
value: {{ .Values.logLevel }}
- name: RUST_BACKTRACE
value: {{ .Values.backtraceLevel }}
volumeMounts:
- name: shared-data
mountPath: /shared-data
- name: create-secret
image: alpine/k8s:1.24.13
command: [ "sh", "-ec" ]
args:
- |
if [ -f "/shared-data/secret-found" ]; then
echo "Secret already exists."
else
echo "Creating k8s secret from key."
kubectl create secret generic {{ include "chronicle.root-key.secret" . }} \
-n {{ .Release.Namespace }} \
--from-file=/shared-data/root.pem
fi
volumeMounts:
- name: shared-data
mountPath: /shared-data
{{ if .Values.opa.enabled }}
- name: opa-bootstrap-root
{{- include "lib.image" (dict "imageRoot" .Values.opa.opaInit.image "global" .Values.global ) | nindent 10 }}
command: [ "bash", "-ec"]
args:
- |
wait-for-it $HOST:$PORT --timeout=0
echo "Waiting to ensure Sawtooth validator is ready ..."
sleep 100
if [[ -f "/shared-data/secret-found" ]]; then
echo "Skipping root key bootstrap."
else
opactl \
--sawtooth-address tcp://$HOST:$PORT \
bootstrap \
--root-key /shared-data/root.pem
fi
env: {{ include "lib.safeToYaml" .Values.env | nindent 12 }}
- name: HOST
value: {{ $stlServiceName }}.{{ .Release.Namespace }}.svc.cluster.local
- name: PORT
value: "{{ include "chronicle.sawtooth.sawcomp" . }}"
- name: RUST_LOG
value: {{ .Values.logLevel }}
- name: RUST_BACKTRACE
value: {{ .Values.backtraceLevel }}
volumeMounts:
- name: shared-data
mountPath: /shared-data
{{ if .Values.opa.policy.url }}
- name: wait-for-sawtooth-rest-api
{{- include "lib.image" (dict "imageRoot" .Values.opa.opaInit.image "global" .Values.global ) | nindent 10 }}
command: [ "bash", "-ec"]
args:
- |
wait-for-it $HOST:$PORT --timeout=0
echo "Sawtooth rest API is ready."
env:
- name: HOST
value: {{ $stlServiceName }}.{{ .Release.Namespace }}.svc.cluster.local
- name: PORT
value: "{{ include "chronicle.sawtooth.rest" . }}"
- name: RUST_LOG
value: {{ .Values.logLevel }}
- name: RUST_BACKTRACE
value: {{ .Values.backtraceLevel }}
volumeMounts:
- name: shared-data
mountPath: /shared-data
- name: opa-settings
{{- include "lib.image" (dict "imageRoot" .Values.sawset.image "global" .Values.global ) | nindent 10 }}
command: [ "bash", "-ec"]
args:
- |
if sawtooth settings list --url http://$HOST:$PORT | grep -q "chronicle.opa.policy_name"; then
echo "Skipping setting Sawtooth OPA settings."
exit 0
else
echo "Creating Sawtooth settings batch."
sawset proposal create \
-k /etc/sawtooth/keys/{{ $stlServiceName }}-0 \
chronicle.opa.policy_name={{ required "opa.policy.id required!" .Values.opa.policy.id }} \
chronicle.opa.entrypoint={{ required "opa.policy.entrypoint required!" .Values.opa.policy.entrypoint }} \
-o /shared-data/opa-settings.batch
echo "Submitting Sawtooth OPA settings batch."
sawtooth batch submit \
-f /shared-data/opa-settings.batch \
--url http://$HOST:$PORT \
--wait 60
fi
env:
- name: HOST
value: {{ $stlServiceName }}.{{ .Release.Namespace }}.svc.cluster.local
- name: PORT
value: "{{ include "chronicle.sawtooth.rest" . }}"
volumeMounts:
- name: shared-data
mountPath: /shared-data
- name: validator-secret
mountPath: /etc/sawtooth/keys
readOnly: true
- name: get-policy
{{- include "lib.image" (dict "imageRoot" .Values.opa.opaInit.image "global" .Values.global ) | nindent 10 }}
command: [ "bash", "-ec"]
args:
- |
if opactl \
--sawtooth-address tcp://$HOST:$PORT \
get-policy \
--id {{ .Values.opa.policy.id }} \
--output policy.bin >/dev/null 2>&1; then
echo "Policy already set."
touch /shared-data/policy-already-set
else
echo "Policy not found."
exit 0
fi
env: {{ include "lib.safeToYaml" .Values.env | nindent 12 }}
- name: HOST
value: {{ $stlServiceName }}.{{ .Release.Namespace }}.svc.cluster.local
- name: PORT
value: "{{ include "chronicle.sawtooth.sawcomp" . }}"
- name: RUST_LOG
value: {{ .Values.logLevel }}
- name: RUST_BACKTRACE
value: {{ .Values.backtraceLevel }}
volumeMounts:
- name: shared-data
mountPath: /shared-data
- name: set-policy
{{- include "lib.image" (dict "imageRoot" .Values.opa.opaInit.image "global" .Values.global ) | nindent 10 }}
command: [ "bash", "-ec"]
args:
- |
if [[ -f "/shared-data/policy-already-set" ]]; then
echo "Skipping setting policy."
exit 0
else
echo "Policy not found on chain. Setting policy."
opactl \
--sawtooth-address tcp://$HOST:$PORT \
set-policy \
--id {{ .Values.opa.policy.id }} \
-p {{ .Values.opa.policy.url }} \
--root-key /shared-data/root.pem
fi
env: {{ include "lib.safeToYaml" .Values.env | nindent 12 }}
- name: HOST
value: {{ $stlServiceName }}.{{ .Release.Namespace }}.svc.cluster.local
- name: PORT
value: "{{ include "chronicle.sawtooth.sawcomp" . }}"
- name: RUST_LOG
value: {{ .Values.logLevel }}
- name: RUST_BACKTRACE
value: {{ .Values.backtraceLevel }}
volumeMounts:
- name: shared-data
mountPath: /shared-data
{{ end }}
{{ end }}
containers:
- name: chronicle-init
image: busybox:1.36
command: [ "sh", "-c"]
args:
- |
echo "Chronicle bootstrap and OPA settings initialization complete."
volumes:
- name: shared-data
emptyDir: {}
- name: validator-secret
configMap:
name: validator-secret

View File

@ -0,0 +1,20 @@
{{- if .Values.devIdProvider.enabled }}
---
apiVersion: v1
kind: Service
metadata:
name: {{ include "chronicle.id-provider.service" . }}
labels: {{ include "chronicle.labels" . | nindent 4 }}
component: test-id-provider
spec:
type: ClusterIP
clusterIP: None
sessionAffinity: ClientIP
ports:
- port: 8090
protocol: TCP
targetPort: 8090
name: {{ include "chronicle.id-provider.service" . }}
selector: {{ include "chronicle.labels.matchLabels" . | nindent 4 }}
component: test-id-provider
{{- end }}

View File

@ -0,0 +1,28 @@
{{- if .Values.devIdProvider.enabled }}
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ include "chronicle.id-provider.service" . }}
labels: {{ include "chronicle.labels" . | nindent 4 }}
component: test-id-provider
spec:
selector:
matchLabels: {{ include "chronicle.labels.matchLabels" . | nindent 6 }}
component: test-id-provider
serviceName: {{ include "chronicle.id-provider.service" . }}
template:
metadata:
labels: {{ include "chronicle.labels" . | nindent 8 }}
component: test-id-provider
spec:
serviceAccountName: {{ include "lib.serviceAccountName" . }}
affinity: {{ include "lib.safeToYaml" .Values.affinity | nindent 8 }}
containers:
- name: id-provider
{{- include "lib.image" (dict "imageRoot" .Values.devIdProvider.image "global" .Values.global ) | nindent 10 }}
ports:
- name: jwks
containerPort: 8090
protocol: TCP
{{- end }}

View File

@ -1,4 +1,4 @@
{{- $serviceName := (include "common.names.fullname" . ) -}}
{{- $serviceName := (include "chronicle.api.service" . ) -}}
{{- $ingressName := printf "%s" $serviceName -}}
{{- $servicePort := .Values.port | int -}}
{{ include "lib.ingress" (dict "ingressName" $ingressName "ingress" .Values.ingress "serviceName" $serviceName "servicePort" $servicePort "context" $) }}

View File

@ -0,0 +1,20 @@
{{- if .Values.opa.enabled }}
{{- $ctx := . -}}
{{ range untilStep 0 ((include "tp.replicas" $ctx) | int) 1 }}
---
apiVersion: v1
kind: Service
metadata:
name: {{ include "common.names.fullname" $ctx }}-opa-tp-{{ . }}
labels: {{- include "lib.labels" $ctx | nindent 4 }}
per-node: {{ include "common.names.fullname" $ctx }}-{{ . }}
component: opa-tp
spec:
type: ClusterIP
clusterIP: None
sessionAffinity: ClientIP
selector: {{- include "common.labels.matchLabels" $ctx | nindent 4 }}
component: opa-tp
statefulset.kubernetes.io/pod-name: {{ include "common.names.fullname" $ctx }}-{{ . }}
{{- end -}}
{{- end }}

View File

@ -0,0 +1,28 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "common.names.fullname" . }}-secrets-role
namespace: {{.Release.Namespace}}
rules:
- apiGroups:
- "" # "" refers to the core API group
resources:
- secrets
verbs:
- create
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "common.names.fullname" . }}-secrets-role-rb
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ include "common.names.fullname" . }}-secrets-role
subjects:
- kind: ServiceAccount
name: {{ include "lib.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}

View File

@ -2,12 +2,11 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "chronicle.service.name" . }}-chronicle-api
name: {{ include "chronicle.api.service" . }}
labels: {{ include "chronicle.labels" . | nindent 4 }}
component: chronicle
spec:
type: ClusterIP
clusterIP: None
sessionAffinity: ClientIP
ports:
- port: {{ .Values.port }}

View File

@ -1,3 +1,4 @@
{{$stlServiceName := include "lib.call-nested" (list . "sawtooth" "common.names.fullname")}}
---
apiVersion: apps/v1
kind: StatefulSet
@ -48,6 +49,27 @@ spec:
- name: chronicle-secrets
mountPath: /var/lib/chronicle/secrets/
readOnly: false
{{- if and .Values.opa.enabled .Values.opa.policy.url }}
- name: wait-for-opa-settings
{{- include "lib.image" (dict "imageRoot" .Values.sawset.image "global" .Values.global ) | nindent 10 }}
command: [ "bash", "-exc"]
args:
- |
keepTrying=true
while [ $keepTrying = "true" ]; do
if sawtooth settings list --url http://$HOST:$PORT | grep -q "chronicle.opa.policy_name"; then
break
else
echo "Waiting for OPA policy id."
sleep 10
fi
done
env:
- name: HOST
value: {{ $stlServiceName }}.{{ .Release.Namespace }}.svc.cluster.local
- name: PORT
value: "{{ include "chronicle.sawtooth.rest" . }}"
{{- end }}
containers:
{{- if .Values.postgres.enabled }}
- name: postgres
@ -80,17 +102,38 @@ spec:
command: [ "bash", "-c"]
args:
- |
{{ if .Values.auth.required }}
{{ if and (not .Values.auth.jwks.url) (not .Values.auth.userinfo.url) (not .Values.devIdProvider.enabled) }}
{{ required "If auth.required you need to provide at least auth.jwks.url or auth.userinfo.url" .Values.auth.jwks.url }}
{{ end }}
{{ end }}
echo "Waiting 20 seconds for postgres to start";
sleep 20;
/usr/local/bin/chronicle \
-c /etc/chronicle/config/config.toml \
--console-logging json \
--sawtooth tcp://{{ include "chronicle.sawtooth.service" . }}:{{ include "chronicle.sawtooth.sawcomp" . }} \
--remote-database \
--database-name {{ .Values.postgres.database }} \
--database-username {{ .Values.postgres.user }} \
--database-host {{ .Values.postgres.host }} \
serve-graphql --interface 0.0.0.0:{{ .Values.port}} {{ if .Values.webUi }} --open {{ end }}
chronicle \
-c /etc/chronicle/config/config.toml \
--console-logging json \
--sawtooth tcp://{{ include "chronicle.sawtooth.service" . }}:{{ include "chronicle.sawtooth.sawcomp" . }} \
--remote-database \
--database-name {{ .Values.postgres.database }} \
--database-username {{ .Values.postgres.user }} \
--database-host {{ .Values.postgres.host }} \
{{- if not .Values.opa.enabled }}
--embedded-opa-policy \
{{- end }}
serve-api \
--interface 0.0.0.0:{{ .Values.port}} \
{{- if .Values.auth.required }}
--require-auth \
--id-claims {{ .Values.auth.id.claims }} \
{{- if .Values.auth.jwks.enabled }}
--jwks-address {{ include "chronicle.jwksUrl" . }} \
{{- end }}
{{- if .Values.auth.userinfo.url }}
--userinfo-address {{ include "chronicle.userinfoUrl" . }} \
{{- end }}
{{- end }}
;
env: {{ include "lib.safeToYaml" .Values.env | nindent 12 }}
- name: RUST_LOG
value: {{ .Values.logLevel }}

View File

@ -0,0 +1,37 @@
{{- if .Values.test.enabled }}
{{- if .Values.auth.required }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "common.names.fullname" . }}-test-token-getter-role
namespace: {{.Release.Namespace}}
rules:
- apiGroups:
- "" # "" refers to the core API group
resources:
- pods/exec
verbs:
- create
- apiGroups:
- "" # "" refers to the core API group
resources:
- pods
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "common.names.fullname" . }}-test-token-getter-rb
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ include "common.names.fullname" . }}-test-token-getter-role
subjects:
- kind: ServiceAccount
name: {{ include "lib.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
{{- end }}
{{- end }}

View File

@ -1,3 +1,4 @@
{{- if .Values.test.enabled }}
apiVersion: batch/v1
kind: Job
metadata:
@ -8,16 +9,87 @@ metadata:
"helm.sh/hook": test
"helm.sh/hook-delete-policy": hook-succeeded
spec:
backoffLimit: 0
template:
spec:
restartPolicy: Never
serviceAccountName: {{ include "lib.serviceAccountName" . }}
automountServiceAccountToken: true
initContainers:
{{- if .Values.auth.required }}
- name: wait-for-id-provider
{{- include "lib.image" (dict "imageRoot" .Values.test.api.image "global" .Values.global ) | nindent 10 }}
command: [ "sh", "-ec" ]
args:
- |
URL={{ include "chronicle.jwksUrl" . }}
scheme=$(echo $URL | cut -f 1 -d :)
hostAndPort=$(echo $URL | cut -f 3 -d /)
HOST=$(echo $hostAndPort | cut -f 1 -d :)
port=$(echo $hostAndPort | awk -F: '{print $2}')
if [ "$scheme" = "http" ]; then
defaultPort=80
elif [ "$scheme" = "https" ]; then
defaultPort=443
else
defaultPort=80
fi
PORT=${port:-$defaultPort}
echo "Waiting for id-provider to be ready ..."
wait-for-it $HOST:$PORT --timeout=0
echo "Id-provider is ready. Exiting."
- name: token-loader
image: alpine/k8s:1.24.13
command: [ "sh", "-ec" ]
args:
- |
echo "Waiting to ensure id-provider is ready ..."
sleep 20
echo "Getting token from id-provider ..."
kubectl exec {{ include "chronicle.id-provider.service" . }}-0 -c id-provider -- oauth-token > /shared-data/jwks-token
echo "Token loaded. Exiting."
volumeMounts:
- name: shared-data
mountPath: /shared-data
{{- end }}
containers:
- name: test
{{- include "lib.image" (dict "imageRoot" .Values.test.image "global" .Values.global ) | nindent 10 }}
command: {{ .Values.test.command }}
{{- include "lib.image" (dict "imageRoot" .Values.test.api.image "global" .Values.global ) | nindent 10 }}
command: [ "sh", "-ec" ]
args:
- |
API={{ include "chronicle.api.service" . }}
export PORT={{ .Values.port }}
echo "Waiting for API to be ready ..."
wait-for-it $API:$PORT --timeout=0
echo "Getting IP address for API ..."
getent hosts $API | cut -f 1 -d \ | head -n 1 > /shared-data/api-ip || exit 1
{{- if .Values.auth.required }}
if [ -f "/shared-data/jwks-token" ]; then
echo "Found token."
sleep 5
export TOKEN=$(cat "/shared-data/jwks-token")
fi
{{- end }}
export HOST=$(cat /shared-data/api-ip)
echo "Testing API with subscribe-submit-test..."
subscribe-submit-test
exit_code=$?
if [ $exit_code -eq 0 ]; then
echo "Test complete."
exit $exit_code
else
echo "Test failed."
exit $exit_code
fi
env:
- name: HOST
value: {{ include "chronicle.service.name" . }}-chronicle-api
- name: PORT
value: {{ .Values.port | quote }}
serviceAccountName: {{ include "lib.serviceAccountName" . }}
restartPolicy: Never
- name: REQUIRE_AUTH
value: {{ .Values.auth.required | quote }}
volumeMounts:
- name: shared-data
mountPath: /shared-data
volumes: {{- include "lib.volumes" .Values.opa.tp.extraVolumes | nindent 8 }}
- name: shared-data
emptyDir: {}
{{- end }}

View File

@ -1,11 +1,10 @@
---
{{$stlServiceName := include "lib.call-nested" (list . "sawtooth" "common.names.fullname")}}
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ include "common.names.fullname" . }}-tp
labels: {{ include "chronicle.labels" . | nindent 4 }}
component: tp
component: chronicle-tp
spec:
replicas: {{ include "tp.replicas" . }}
serviceName: {{ include "common.names.fullname" . }}-tp
@ -34,10 +33,11 @@ spec:
args:
- |
HOST={{ $stlServiceName }}-${HOST##*-}.{{ .Release.Namespace }}.svc.cluster.local
echo tcp://$HOST:{{ include "chronicle.sawtooth.sawcomp" . }} &&
PORT={{ include "chronicle.sawtooth.sawcomp" . }}
echo tcp://$HOST:$PORT &&
/usr/local/bin/chronicle_sawtooth_tp \
--console-logging json \
--connect tcp://$HOST:{{ include "chronicle.sawtooth.sawcomp" . }}
--connect tcp://$HOST:$PORT
resources: {{- include "lib.safeToYaml" .Values.tp.resources | nindent 12 }}
env:
- name: RUST_LOG
@ -48,4 +48,30 @@ spec:
fieldPath: metadata.name
{{- include "lib.safeToYaml" .Values.tp.env | nindent 12 }}
volumeMounts: {{- include "lib.volumeMounts" .Values.tp.extraVolumeMounts | nindent 12 }}
{{- if .Values.opa.enabled }}
- name: opa-tp
{{- include "lib.image" (dict "imageRoot" .Values.opa.tp.image "global" .Values.global ) | nindent 10 }}
command: [ "bash", "-xc"]
args:
- |
HOST={{ $stlServiceName }}-${HOST##*-}.{{ .Release.Namespace }}.svc.cluster.local
PORT={{ include "chronicle.sawtooth.sawcomp" . }}
wait-for-it $HOST:$PORT --timeout=0
echo tcp://$HOST:$PORT &&
/usr/local/bin/opa-tp \
-C tcp://$HOST:$PORT \
--console-logging json
resources: {{- include "lib.safeToYaml" .Values.opa.tp.resources | nindent 12 }}
env: {{ include "lib.safeToYaml" .Values.env | nindent 12 }}
- name: RUST_LOG
value: {{ .Values.logLevel }}
- name: RUST_BACKTRACE
value: {{ .Values.backtraceLevel }}
- name: HOST
valueFrom:
fieldRef:
fieldPath: metadata.name
{{- include "lib.safeToYaml" .Values.tp.env | nindent 12 }}
volumeMounts: {{- include "lib.volumeMounts" .Values.opa.tp.extraVolumeMounts | nindent 12 }}
{{- end }}
volumes: {{- include "lib.volumes" .Values.tp.extraVolumes | nindent 8 }}

View File

@ -8,59 +8,52 @@ global:
image:
tag:
imagePullSecrets:
## @md | `imagePullSecrets.enabled`| if true use the list of named imagePullSecrets | false |
enabled: false
## @md | `imagePullSecrets.value`| a list if named secret references of the form `- name: secretName`| [] |
value: []
image:
## @md | `image.repository` | the repository of the image | blockchaintp/chronicle |
repository: blockchaintp/chronicle-amd64
## @md | `image.tag`| the tag of the image to use | latest |
tag: BTP2.1.0-0.6.2
## @md | `image.pullPolicy` | the image pull policy to use | IfNotPresent |
pullPolicy: IfNotPresent
## @md | `logLevel` | log level for chronicle | info |
logLevel: info
## @md | `webUi` | If true serve the graphql playground interface | false |
webUi: false
## @md | `replicas` | number of Chronicle replicas to run | 1 |
replicas: 1
volumes: {}
## @md | `affinity`| custom affinity rules for the chronicle pod | {} |
affinity: {}
test:
## @md | `test.command` | the command to run in the api-test container | ["/usr/local/bin/subscribe-submit-compare"] |
command: ["/usr/local/bin/subscribe-submit-test"]
## @md | `api-test-container.image` | the image to use for the api-test container | blockchaintp/chronicle-api-test |
auth:
## @md | `auth.required` | if true require authentication | false |
required: false
id:
claims: email
jwks:
enabled: true
url:
userinfo:
url:
## @md | `backtraceLevel` | backtrace level for Chronicle | nil |
backtraceLevel: full
devIdProvider:
enabled: true
## @md | `devIdProvider.image` | the image to use for the id-provider container | blockchaintp/id-provider |
image:
## @md | `test.image.pullPolicy` | the image pull policy | IfNotPresent |
pullPolicy: IfNotPresent
## @md | `test.image.repository` | the image repository | blockchaintp/chronicle-helm-api-test |
repository: blockchaintp/chronicle-helm-api-test
## @md | `test.image.tag` | the image tag | latest |
tag: latest
## @md | `devIdProvider.image.pullPolicy` | the image pull policy | IfNotPresent |
pullPolicy: IfNotPresent
## @md | `devIdProvider.image.repository` | the image repository | blockchaintp/id-provider |
repository: blockchaintp/id-provider-amd64
## @md | `devIdProvider.image.tag` | the image tag | latest |
tag: BTP2.1.0-0.7.3
## @md | `extraVolumes` | a list of additional volumes to add to chronicle | [] |
extraVolumes: []
## @md | `extraVolumeMounts` | a list of additional volume mounts to add to chronicle | [] |
extraVolumeMounts: []
## @md | `port` | the port on which the chronicle service listens | 9982 |
port: 9982
image:
## @md | `image.repository` | the repository of the image | blockchaintp/chronicle |
repository: blockchaintp/chronicle-amd64
## @md | `image.tag`| the tag of the image to use | latest |
tag: BTP2.1.0-0.7.3
## @md | `image.pullPolicy` | the image pull policy to use | IfNotPresent |
pullPolicy: IfNotPresent
serviceAccount:
## @md | `serviceAccount.create` | true to create a service account | false |
create: true
## @md | `serviceAccount.name` | name of the service account | nil (defaults to based on release name) |
name:
imagePullSecrets:
## @md | `imagePullSecrets.enabled`| if true use the list of named imagePullSecrets | false |
enabled: false
## @md | `imagePullSecrets.value`| a list if named secret references of the form `- name: secretName`| [] |
value: []
ingress:
## @md | `ingress.apiVersion` | if necessary the apiVersion of the ingress may be overridden | "" |
@ -91,34 +84,64 @@ ingress:
# paths:
# - /
## @md | `sawtooth` | sawtooth options may be configured | see [Sawtooth](../sawtooth/README.md) |
sawtooth:
sawtooth:
statefulset:
enabled: true
consensus: 400
## @md | `logLevel` | log level for Chronicle | info |
logLevel: info
tp:
## @md | `tp.args` | a string of arguments to pass to the tp container| nil |
args:
image:
## @md | `tp.image.pullPolicy` | the image pull policy | IfNotPresent |
pullPolicy: IfNotPresent
## @md | `tp.image.repository` | the image repository | blockchaintp/chronicle-tp |
repository: blockchaintp/chronicle-tp-amd64
## @md | `tp.image.tag` | the image tag | BTP2.1.0 |
tag: BTP2.1.0-0.6.2
## @md | `tp.extraVolumes` | extra volumes declarations for the chronicle-tp deployment | list | nil
extraVolumes:
## @md | `tp.extraVolumeMounts` | extra volume mounts for chronicle-tp deployment | list | nil
extraVolumeMounts:
## @md | `tp.resources` | resources | map | nil |
resources:
## @md | `tp.maxUnavailable` | maximum unavailable nodes during a rolling upgrade |
maxUnavailable: 1
## @md | `tp.minReadySeconds` | minimum time before node becomes available |
minReadySeconds: 0
logLevel: info
opa:
## @md | `opa.enabled` | if true set up a full OPA enabled setup | true |
enabled: true
opaInit:
## @md | `opa.init.image` | the image to use for the chronicle-init container | blockchaintp/chronicle-opa-init |
image:
## @md | `image.pullPolicy` | the image pull policy to use | IfNotPresent |
pullPolicy: IfNotPresent
## @md | `image.repository` | the repository of the image | blockchaintp/chronicle |
repository: blockchaintp/opactl-amd64
## @md | `image.tag`| the tag of the image to use | latest |
tag: BTP2.1.0-0.7.3
policy:
entrypoint: allow_transactions.allowed_users
id: allow_transactions
url: file:///app/policies/bundle.tar.gz
tp:
image:
## @md | `image.repository` | the repository of the image | blockchaintp/chronicle |
repository: blockchaintp/opa-tp-amd64
## @md | `image.tag`| the tag of the image to use | latest |
tag: BTP2.1.0-0.7.3
## @md | `image.pullPolicy` | the image pull policy to use | IfNotPresent |
pullPolicy: IfNotPresent
## @md | `opa.tp.resources` | resources | map | nil |
resources:
## @md | `opa.tp.extraVolumes` | extra volumes declarations for the opa-tp deployment | list | nil
extraVolumes:
## @md | `opa.tp.extraVolumeMounts` | extra volume mounts for opa-tp deployment | list | nil
extraVolumeMounts:
## @md | `port` | the port on which the chronicle service listens | 9982 |
port: 9982
## @md | `replicas` | number of Chronicle replicas to run | 1 |
replicas: 1
serviceAccount:
## @md | `serviceAccount.create` | true to create a service account | false |
create: true
## @md | `serviceAccount.name` | name of the service account | nil (defaults to based on release name) |
name:
test:
## @md | `test.enabled` | true to enable test Jobs and Services | true |
enabled: true
api:
## @md | `api-test-container.image` | the image to use for the api-test container | blockchaintp/chronicle-api-test |
image:
## @md | `test.api.image.pullPolicy` | the image pull policy | IfNotPresent |
pullPolicy: IfNotPresent
## @md | `test.api.image.repository` | the image repository | blockchaintp/chronicle-helm-api-test |
repository: blockchaintp/chronicle-helm-api-test-amd64
## @md | `test.api.image.tag` | the image tag | latest |
tag: BTP2.1.0-0.7.3
postgres:
# if enabled we allocate a postgres database here
@ -169,3 +192,43 @@ postgres:
## @md | `resources` | resources | map | nil |
resources:
sawset:
image:
## @md | `sawset.image.pullPolicy` | the image pull policy | IfNotPresent |
pullPolicy: IfNotPresent
## @md | `sawset.image.repository` | the image repository | blockchaintp/sawtooth-validator |
repository: blockchaintp/sawtooth-validator
## @md | `sawset.image.tag` | the image tag | latest |
tag: BTP2.1.0
tp:
## @md | `tp.args` | a string of arguments to pass to the tp container| nil |
args:
image:
## @md | `tp.image.pullPolicy` | the image pull policy | IfNotPresent |
pullPolicy: IfNotPresent
## @md | `tp.image.repository` | the image repository | blockchaintp/chronicle-tp |
repository: blockchaintp/chronicle-tp-amd64
## @md | `tp.image.tag` | the image tag | latest |
tag: BTP2.1.0-0.7.3
## @md | `tp.extraVolumes` | extra volumes declarations for the chronicle-tp deployment | list | nil
extraVolumes:
## @md | `tp.extraVolumeMounts` | extra volume mounts for chronicle-tp deployment | list | nil
extraVolumeMounts:
## @md | `tp.resources` | resources | map | nil |
resources:
## @md | `tp.maxUnavailable` | maximum unavailable nodes during a rolling upgrade |
maxUnavailable: 1
## @md | `tp.minReadySeconds` | minimum time before node becomes available |
minReadySeconds: 0
logLevel: info
volumes: {}
## @md | `sawtooth` | sawtooth options may be configured | see [Sawtooth](../sawtooth/README.md) |
sawtooth:
sawtooth:
consensus: 400
statefulset:
enabled: true

View File

@ -1,6 +1,6 @@
dependencies:
- name: crate-operator-crds
repository: file://../crate-operator-crds
version: 2.28.0
digest: sha256:ad0c820982449b343f2c2810bec1d95743849606aad34b9b942a57558f17761b
generated: "2023-06-05T08:38:25.444839244Z"
version: 2.29.0
digest: sha256:1c8dbfe1f82cd4423cbf52c9160810973e7d90ce7a7be7ff3e7e77809d21e324
generated: "2023-06-07T09:26:56.453741789Z"

View File

@ -3,16 +3,16 @@ annotations:
catalog.cattle.io/display-name: CrateDB Operator
catalog.cattle.io/release-name: crate-operator
apiVersion: v2
appVersion: 2.28.0
appVersion: 2.29.0
dependencies:
- condition: crate-operator-crds.enabled
name: crate-operator-crds
repository: file://./charts/crate-operator-crds
version: 2.28.0
version: 2.29.0
description: Crate Operator - Helm chart for installing and upgrading Crate Operator.
icon: https://raw.githubusercontent.com/crate/crate/master/docs/_static/crate-logo.svg
maintainers:
- name: Crate.io
name: crate-operator
type: application
version: 2.28.0
version: 2.29.0

View File

@ -1,9 +1,9 @@
apiVersion: v2
appVersion: 2.28.0
appVersion: 2.29.0
description: Crate Operator CRDs - Helm chart for installing and upgrading Custom
Resource Definitions (CRDs) for the Crate Operator.
maintainers:
- name: Crate.io
name: crate-operator-crds
type: application
version: 2.28.0
version: 2.29.0

View File

@ -4,7 +4,7 @@ annotations:
catalog.cattle.io/kube-version: '>= 1.19.0-0'
catalog.cattle.io/release-name: vals-operator
apiVersion: v2
appVersion: v0.7.3
appVersion: v0.7.4
description: This helm chart installs the Digitalis Vals Operator to manage and sync
secrets from supported backends into Kubernetes.
icon: https://digitalis.io/wp-content/uploads/2020/06/cropped-Digitalis-512x512-Blue_Digitalis-512x512-Blue-32x32.png
@ -14,4 +14,4 @@ maintainers:
name: Digitalis.IO
name: vals-operator
type: application
version: 0.7.3
version: 0.7.4

View File

@ -55,6 +55,8 @@ spec:
{{- end }}
ports:
- containerPort: {{ .Values.metricsPort | default 8080 }}
name: metrics
protocol: TCP
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}

View File

@ -0,0 +1,20 @@
{{- if .Values.podMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: {{ include "vals-operator.fullname" . }}
labels:
{{- include "vals-operator.labels" . | nindent 4 }}
spec:
podMetricsEndpoints:
- interval: 30s
port: "metrics"
path: "/metrics"
namespaceSelector:
matchNames:
- "{{ .Release.Namespace }}"
selector:
matchLabels:
app.kubernetes.io/name: {{ include "vals-operator.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}

View File

@ -0,0 +1,40 @@
{{- if .Values.prometheusRules.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: {{ include "vals-operator.fullname" . }}
labels:
{{- include "vals-operator.labels" . | nindent 4 }}
spec:
groups:
- name: vals-operator
rules:
{{- if .Values.enableDbSecrets }}
- alert: ValsOperatorDbSecretError
expr: vals_operator_dbsecret_error > time() - 300
for: 30m
labels:
severity: warning
annotations:
summary: vals-operator database secret not issued
description: "Vals operator has been unable to issue database credentials to {{`{{`}}$labels.secret{{`}}`}} in namespace {{`{{`}}$labels.namespace{{`}}`}}"
{{- if .Values.prometheusRules.additionalRuleLabels }}
{{- with .Values.prometheusRules.additionalRuleLabels }}
{{- toYaml . | nindent 12 }}
{{- end }}
{{- end }}
{{- end }}
- alert: ValsOperatorSecretError
expr: vals_operator_secret_error > time() - 300
for: 30m
labels:
severity: warning
annotations:
summary: vals-operator secret not issued
description: "Vals operator has been unable to create the secret for {{`{{`}}$labels.secret{{`}}`}} in namespace {{`{{`}}$labels.namespace{{`}}`}}"
{{- if .Values.prometheusRules.additionalRuleLabels }}
{{- with .Values.prometheusRules.additionalRuleLabels }}
{{- toYaml . | nindent 12 }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -1,37 +0,0 @@
{{- if .Values.serviceMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "vals-operator.fullname" . }}
labels:
{{- if .Values.serviceMonitor.labels }}
{{ toYaml .Values.serviceMonitor.labels | nindent 4 }}
{{- else }}
app: {{ template "vals-operator.name" . }}
chart: {{ template "vals-operator.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- end }}
{{- if .Values.serviceMonitor.namespace }}
namespace: {{ .Values.serviceMonitor.namespace }}
{{- end }}
spec:
endpoints:
- targetPort: "metrics"
{{- if .Values.serviceMonitor.interval }}
interval: {{ .Values.serviceMonitor.interval }}
{{- end }}
{{- if .Values.serviceMonitor.scrapeTimeout }}
scrapeTimeout: {{ .Values.serviceMonitor.scrapeTimeout }}
{{- end }}
path: /metrics
port: {{ .Values.metricsPort | default 8080 }}
tlsConfig:
insecureSkipVerify: true
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
selector:
matchLabels:
{{- include "vals-operator.selectorLabels" . | nindent 6 }}
{{- end }}

View File

@ -25,6 +25,14 @@ manageCrds: true
# which may not be desired on secure environments
enableDbSecrets: true
prometheusRules:
enabled: false
## Additional labels for PrometheusRule alerts
additionalRuleLabels: {}
## Additional annotations for PrometheusRule alerts
additionalRuleAnnotations: {}
# additional arguments to operator
args: []
# -exclude-namespaces string
@ -88,13 +96,12 @@ securityContext: {}
# runAsNonRoot: true
# runAsUser: 1000
metricsPort: 8080
serviceMonitor:
# When set to true then use a ServiceMonitor to collect metrics
podMonitor:
# When set to true then use a podMonitor to collect metrics
enabled: false
# Custom labels to use in the ServiceMonitor to be matched with a specific Prometheus
# Custom labels to use in the podMonitor to be matched with a specific Prometheus
labels: {}
# Set the namespace the ServiceMonitor should be deployed to
# Set the namespace the podMonitor should be deployed to
# namespace: default
# Set how frequently Prometheus should scrape
# interval: 30s

View File

@ -4,9 +4,9 @@ annotations:
catalog.cattle.io/kube-version: '>=1.19-0'
catalog.cattle.io/release-name: intel-device-plugins-operator
apiVersion: v2
appVersion: 0.26.0
appVersion: 0.26.1
description: A Helm chart for Intel Device Plugins Operator for Kubernetes
icon: https://avatars.githubusercontent.com/u/17888862?s=200&v=4
name: intel-device-plugins-operator
type: application
version: 0.26.0
version: 0.26.1

View File

@ -398,6 +398,16 @@ spec:
selector:
control-plane: controller-manager
---
{{- if .Values.privateRegistry.registrySecret }}
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-operator-private-registry
type: kubernetes.io/dockerconfigjson
data:
.dockerconfigjson: {{ printf "{\"auths\": {\"%s\": {\"auth\": \"%s\"}}}" .Values.privateRegistry.registryUrl (printf "%s:%s" .Values.privateRegistry.registryUser .Values.privateRegistry.registrySecret | b64enc) | b64enc }}
{{- end }}
---
apiVersion: apps/v1
kind: Deployment
metadata:
@ -415,6 +425,10 @@ spec:
labels:
control-plane: controller-manager
spec:
{{- if .Values.privateRegistry.registrySecret }}
imagePullSecrets:
- name: {{ .Release.Name }}-operator-private-registry
{{- end }}
containers:
- args:
{{- if .Values.controllerExtraArgs }}
@ -427,8 +441,8 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: "{{ .Values.image.hub }}/intel-deviceplugin-operator:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
image: "{{ .Values.manager.image.hub }}/intel-deviceplugin-operator:{{ .Values.manager.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.manager.image.pullPolicy }}
name: manager
ports:
- containerPort: 9443
@ -437,10 +451,10 @@ spec:
resources:
limits:
cpu: 100m
memory: 50Mi
memory: 120Mi
requests:
cpu: 100m
memory: 20Mi
memory: 100Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
@ -462,7 +476,7 @@ spec:
- --logtostderr=true
- --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
- --v=10
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.1
image: "{{ .Values.kubeRbacProxy.image.hub }}/{{ .Values.kubeRbacProxy.image.hubRepo }}/kube-rbac-proxy:{{ .Values.kubeRbacProxy.image.tag }}"
name: kube-rbac-proxy
ports:
- containerPort: 8443

View File

@ -1,4 +1,17 @@
image:
hub: intel
tag: ""
pullPolicy: IfNotPresent
manager:
image:
hub: intel
tag: ""
pullPolicy: IfNotPresent
kubeRbacProxy:
image:
hub: gcr.io
hubRepo: kubebuilder
tag: v0.14.1
pullPolicy: IfNotPresent
privateRegistry:
registryUrl: ""
registryUser: ""
registrySecret: ""

View File

@ -4,9 +4,9 @@ annotations:
catalog.cattle.io/kube-version: '>=1.19-0'
catalog.cattle.io/release-name: intel-device-plugins-qat
apiVersion: v2
appVersion: 0.26.0
appVersion: 0.26.1
description: A Helm chart for Intel QAT Device Plugin
icon: https://avatars.githubusercontent.com/u/17888862?s=200&v=4
name: intel-device-plugins-qat
type: application
version: 0.26.0
version: 0.26.1

View File

@ -0,0 +1,6 @@
questions:
- variable: nodeFeatureRule
default: false
type: boolean
label: Enable Node Feature Discovery feature labels
description: "When Node Feature Discovery (NFD) is deployed, enable QAT node labeling using NFD feature rules."

View File

@ -4,9 +4,9 @@ annotations:
catalog.cattle.io/kube-version: '>=1.19-0'
catalog.cattle.io/release-name: intel-device-plugins-sgx
apiVersion: v2
appVersion: 0.26.0
appVersion: 0.26.1
description: A Helm chart for Intel SGX Device Plugin
icon: https://avatars.githubusercontent.com/u/17888862?s=200&v=4
name: intel-device-plugins-sgx
type: application
version: 0.26.0
version: 0.26.1

View File

@ -0,0 +1,6 @@
questions:
- variable: nodeFeatureRule
default: false
type: boolean
label: Enable Node Feature Discovery feature labels
description: "When Node Feature Discovery (NFD) is deployed, enable SGX node labeling using NFD feature rules."

View File

@ -7,7 +7,7 @@ annotations:
catalog.cattle.io/featured: "2"
catalog.cattle.io/release-name: cost-analyzer
apiVersion: v2
appVersion: 1.103.5
appVersion: 1.104.0
dependencies:
- condition: global.grafana.enabled
name: grafana
@ -25,4 +25,4 @@ description: A Helm chart that sets up Kubecost, Prometheus, and Grafana to moni
cloud costs.
icon: https://partner-charts.rancher.io/assets/logos/kubecost.png
name: cost-analyzer
version: 1.103.5
version: 1.104.0

View File

@ -8,51 +8,52 @@ While Helm is the [recommended install path](http://kubecost.com/install), these
<a name="config-options"></a><br/>
The following table lists the commonly used configurable parameters of the Kubecost Helm chart and their default values.
Parameter | Description | Default
--------- | ----------- | -------
`global.prometheus.enabled` | If false, use an existing Prometheus install. [More info](http://docs.kubecost.com/custom-prom). | `true`
`prometheus.kube-state-metrics.disabled` | If false, deploy [kube-state-metrics](https://github.com/kubernetes/kube-state-metrics) for Kubernetes metrics | `false`
`prometheus.kube-state-metrics.resources` | Set kube-state-metrics resource requests and limits. | `{}`
`prometheus.server.persistentVolume.enabled` | If true, Prometheus server will create a Persistent Volume Claim. | `true`
`prometheus.server.persistentVolume.size` | Prometheus server data Persistent Volume size. Default set to retain ~6000 samples per second for 15 days. | `32Gi`
`prometheus.server.retention` | Determines when to remove old data. | `15d`
`prometheus.server.resources` | Prometheus server resource requests and limits. | `{}`
`prometheus.nodeExporter.resources` | Node exporter resource requests and limits. | `{}`
`prometheus.nodeExporter.enabled` `prometheus.serviceAccounts.nodeExporter.create` | If false, do not crate NodeExporter daemonset. | `true`
`prometheus.alertmanager.persistentVolume.enabled` | If true, Alertmanager will create a Persistent Volume Claim. | `true`
`prometheus.pushgateway.persistentVolume.enabled` | If true, Prometheus Pushgateway will create a Persistent Volume Claim. | `true`
`persistentVolume.enabled` | If true, Kubecost will create a Persistent Volume Claim for product config data. | `true`
`persistentVolume.size` | Define PVC size for cost-analyzer | `32.0Gi`
`persistentVolume.dbSize` | Define PVC size for cost-analyzer's flat file database | `32.0Gi`
`ingress.enabled` | If true, Ingress will be created | `false`
`ingress.annotations` | Ingress annotations | `{}`
`ingress.className` | Ingress class name | `{}`
`ingress.paths` | Ingress paths | `["/"]`
`ingress.hosts` | Ingress hostnames | `[cost-analyzer.local]`
`ingress.tls` | Ingress TLS configuration (YAML) | `[]`
`networkPolicy.enabled` | If true, create a NetworkPolicy to deny egress | `false`
`networkPolicy.costAnalyzer.enabled` | If true, create a newtork policy for cost-analzyer | `false`
`networkPolicy.costAnalyzer.annotations` | Annotations to be added to the network policy | `{}`
`networkPolicy.costAnalyzer.additionalLabels` | Additional labels to be added to the network policy | `{}`
`networkPolicy.costAnalyzer.ingressRules` | A list of network policy ingress rules | `null`
`networkPolicy.costAnalyzer.egressRules` | A list of network policy egress rules | `null`
`networkCosts.enabled` | If true, collect network allocation metrics [More info](http://docs.kubecost.com/network-allocation) | `false`
`networkCosts.podMonitor.enabled` | If true, a [PodMonitor](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#podmonitor) for the network-cost daemonset is created | `false`
`serviceMonitor.enabled` | Set this to `true` to create ServiceMonitor for Prometheus operator | `false`
`serviceMonitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}`
`prometheusRule.enabled` | Set this to `true` to create PrometheusRule for Prometheus operator | `false`
`prometheusRule.additionalLabels` | Additional labels that can be used so PrometheusRule will be discovered by Prometheus | `{}`
`grafana.resources` | Grafana resource requests and limits. | `{}`
`grafana.sidecar.datasources.defaultDatasourceEnabled` | Set this to `false` to disable creation of Prometheus datasource in Grafana | `true`
`serviceAccount.create` | Set this to `false` if you want to create the service account `kubecost-cost-analyzer` on your own | `true`
`tolerations` | node taints to tolerate | `[]`
`affinity` | pod affinity | `{}`
`kubecostProductConfigs.productKey.mountPath` | Use instead of `kubecostProductConfigs.productKey.secretname` to declare the path at which the product key file is mounted (eg. by a secrets provisioner) | `N/A`
`kubecostFrontend.api.fqdn` | Customize the upstream api FQDN | `computed in terms of the service name and namespace`
`kubecostFrontend.model.fqdn` | Customize the upstream model FQDN | `computed in terms of the service name and namespace`
`clusterController.fqdn` | Customize the upstream cluster controller FQDN | `computed in terms of the service name and namespace`
`global.grafana.fqdn` | Customize the upstream grafana FQDN | `computed in terms of the release name and namespace`
| Parameter | Description | Default |
|------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------|
| `global.prometheus.enabled` | If false, use an existing Prometheus install. [More info](http://docs.kubecost.com/custom-prom). | `true` |
| `prometheus.kube-state-metrics.disabled` | If false, deploy [kube-state-metrics](https://github.com/kubernetes/kube-state-metrics) for Kubernetes metrics | `false` |
| `prometheus.kube-state-metrics.resources` | Set kube-state-metrics resource requests and limits. | `{}` |
| `prometheus.server.persistentVolume.enabled` | If true, Prometheus server will create a Persistent Volume Claim. | `true` |
| `prometheus.server.persistentVolume.size` | Prometheus server data Persistent Volume size. Default set to retain ~6000 samples per second for 15 days. | `32Gi` |
| `prometheus.server.retention` | Determines when to remove old data. | `15d` |
| `prometheus.server.resources` | Prometheus server resource requests and limits. | `{}` |
| `prometheus.nodeExporter.resources` | Node exporter resource requests and limits. | `{}` |
| `prometheus.nodeExporter.enabled` `prometheus.serviceAccounts.nodeExporter.create` | If false, do not crate NodeExporter daemonset. | `true` |
| `prometheus.alertmanager.persistentVolume.enabled` | If true, Alertmanager will create a Persistent Volume Claim. | `true` |
| `prometheus.pushgateway.persistentVolume.enabled` | If true, Prometheus Pushgateway will create a Persistent Volume Claim. | `true` |
| `persistentVolume.enabled` | If true, Kubecost will create a Persistent Volume Claim for product config data. | `true` |
| `persistentVolume.size` | Define PVC size for cost-analyzer | `32.0Gi` |
| `persistentVolume.dbSize` | Define PVC size for cost-analyzer's flat file database | `32.0Gi` |
| `ingress.enabled` | If true, Ingress will be created | `false` |
| `ingress.annotations` | Ingress annotations | `{}` |
| `ingress.className` | Ingress class name | `{}` |
| `ingress.paths` | Ingress paths | `["/"]` |
| `ingress.hosts` | Ingress hostnames | `[cost-analyzer.local]` |
| `ingress.tls` | Ingress TLS configuration (YAML) | `[]` |
| `networkPolicy.enabled` | If true, create a NetworkPolicy to deny egress | `false` |
| `networkPolicy.costAnalyzer.enabled` | If true, create a newtork policy for cost-analzyer | `false` |
| `networkPolicy.costAnalyzer.annotations` | Annotations to be added to the network policy | `{}` |
| `networkPolicy.costAnalyzer.additionalLabels` | Additional labels to be added to the network policy | `{}` |
| `networkPolicy.costAnalyzer.ingressRules` | A list of network policy ingress rules | `null` |
| `networkPolicy.costAnalyzer.egressRules` | A list of network policy egress rules | `null` |
| `networkCosts.enabled` | If true, collect network allocation metrics [More info](http://docs.kubecost.com/network-allocation) | `false` |
| `networkCosts.podMonitor.enabled` | If true, a [PodMonitor](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#podmonitor) for the network-cost daemonset is created | `false` |
| `serviceMonitor.enabled` | Set this to `true` to create ServiceMonitor for Prometheus operator | `false` |
| `serviceMonitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` |
| `serviceMonitor.relabelings` | Sets Prometheus metric_relabel_configs on the scrape job | `[]` |
| `serviceMonitor.metricRelabelings` | Sets Prometheus relabel_configs on the scrape job | `[]` |
| `prometheusRule.enabled` | Set this to `true` to create PrometheusRule for Prometheus operator | `false` |
| `prometheusRule.additionalLabels` | Additional labels that can be used so PrometheusRule will be discovered by Prometheus | `{}` |
| `grafana.resources` | Grafana resource requests and limits. | `{}` |
| `grafana.sidecar.datasources.defaultDatasourceEnabled` | Set this to `false` to disable creation of Prometheus datasource in Grafana | `true` |
| `serviceAccount.create` | Set this to `false` if you want to create the service account `kubecost-cost-analyzer` on your own | `true` |
| `tolerations` | node taints to tolerate | `[]` |
| `affinity` | pod affinity | `{}` |
| `kubecostProductConfigs.productKey.mountPath` | Use instead of `kubecostProductConfigs.productKey.secretname` to declare the path at which the product key file is mounted (eg. by a secrets provisioner) | `N/A` |
| `kubecostFrontend.api.fqdn` | Customize the upstream api FQDN | `computed in terms of the service name and namespace` |
| `kubecostFrontend.model.fqdn` | Customize the upstream model FQDN | `computed in terms of the service name and namespace` |
| `clusterController.fqdn` | Customize the upstream cluster controller FQDN | `computed in terms of the service name and namespace` |
| `global.grafana.fqdn` | Customize the upstream grafana FQDN | `computed in terms of the release name and namespace` |
## Testing
To perform local testing do next:

View File

@ -45,6 +45,9 @@ spec:
securityContext:
{{ toYaml .Values.securityContext | indent 8 }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: "{{ .Values.priorityClassName }}"
{{- end }}
{{- if .Values.dashboards }}
initContainers:
- name: download-dashboards
@ -73,8 +76,8 @@ spec:
containers:
{{- if .Values.sidecar.dashboards.enabled }}
- name: {{ template "grafana.name" . }}-sc-dashboard
image: "{{ .Values.sidecar.image }}"
imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }}
image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}"
imagePullPolicy: {{ .Values.sidecar.image.pullPolicy }}
env:
- name: LABEL
value: "{{ .Values.sidecar.dashboards.label }}"
@ -90,8 +93,8 @@ spec:
{{- end}}
{{- if .Values.sidecar.datasources.enabled }}
- name: {{ template "grafana.name" . }}-sc-datasources
image: "{{ .Values.sidecar.image }}"
imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }}
image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}"
imagePullPolicy: {{ .Values.sidecar.image.pullPolicy }}
env:
- name: LABEL
value: "{{ .Values.sidecar.datasources.label }}"

View File

@ -1,6 +1,6 @@
rbac:
create: true
pspEnabled: true
pspEnabled: false
pspUseAppArmor: true
serviceAccount:
create: true
@ -256,8 +256,10 @@ smtp:
## Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders
## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards
sidecar:
image: kiwigrid/k8s-sidecar:1.23.1
imagePullPolicy: IfNotPresent
image:
repository: kiwigrid/k8s-sidecar
tag: 1.23.1
pullPolicy: IfNotPresent
resources:
# limits:
# cpu: 100m
@ -275,3 +277,7 @@ sidecar:
enabled: false
# label that the configmaps with datasources are marked with
label: grafana_datasource
## PriorityClassName
## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
priorityClassName: ""

View File

@ -181,6 +181,7 @@ Parameter | Description | Default
`alertmanager.strategy` | Deployment strategy | `{ "type": "RollingUpdate" }`
`alertmanagerFiles.alertmanager.yml` | Prometheus alertmanager configuration | example configuration
`configmapReload.prometheus.enabled` | If false, the configmap-reload container for Prometheus will not be deployed | `true`
`configmapReload.prometheus.containerSecurityContext` | securityContext for container | `{}`
`configmapReload.prometheus.name` | configmap-reload container name | `configmap-reload`
`configmapReload.prometheus.image.repository` | configmap-reload container image repository | `jimmidyson/configmap-reload`
`configmapReload.prometheus.image.tag` | configmap-reload container image tag | `v0.5.0`
@ -320,6 +321,7 @@ Parameter | Description | Default
`server.persistentVolume.storageClass` | Prometheus server data Persistent Volume Storage Class | `unset`
`server.persistentVolume.volumeBindingMode` | Prometheus server data Persistent Volume Binding Mode | `unset`
`server.persistentVolume.subPath` | Subdirectory of Prometheus server data Persistent Volume to mount | `""`
`server.containerSecurityContext` | securityContext for container | `{}`
`server.emptyDir.sizeLimit` | emptyDir sizeLimit if a Persistent Volume is not used | `""`
`server.podAnnotations` | annotations to be added to Prometheus server pods | `{}`
`server.podLabels` | labels to be added to Prometheus server pods | `{}`

View File

@ -60,6 +60,10 @@ spec:
{{- end }}
resources:
{{ toYaml .Values.configmapReload.prometheus.resources | indent 12 }}
{{- with .Values.configmapReload.prometheus.containerSecurityContext }}
securityContext:
{{- toYaml . | nindent 12 }}
{{- end }}
volumeMounts:
{{- if .Values.selfsignedCertConfigMapName }}
- name: {{ .Values.selfsignedCertConfigMapName }}
@ -123,6 +127,10 @@ spec:
successThreshold: {{ .Values.server.livenessProbeSuccessThreshold }}
resources:
{{ toYaml .Values.server.resources | indent 12 }}
{{- with .Values.server.containerSecurityContext }}
securityContext:
{{- toYaml . | nindent 12 }}
{{- end }}
volumeMounts:
- name: config-volume
mountPath: /etc/config

View File

@ -333,7 +333,6 @@ configmapReload:
##
extraVolumeDirs: []
## Additional configmap-reload mounts
##
extraConfigmapMounts: []
@ -343,11 +342,14 @@ configmapReload:
# configMap: prometheus-alerts
# readOnly: true
## configmap-reload resource requests and limits
## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources: {}
## configmap-reload container securityContext
containerSecurityContext: {}
alertmanager:
## If false, the configmap-reload container will not be deployed
##
@ -871,6 +873,8 @@ server:
runAsGroup: 1001
fsGroup: 1001
containerSecurityContext: {}
service:
annotations: {}
labels: {}

View File

@ -41,6 +41,9 @@ spec:
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
{{ toYaml .Values.imagePullSecrets | indent 2 }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: "{{ .Values.priorityClassName }}"
{{- end }}
containers:
- name: thanos-bucket

View File

@ -45,6 +45,9 @@ spec:
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
{{ toYaml .Values.imagePullSecrets | indent 2 }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: "{{ .Values.priorityClassName }}"
{{- end }}
containers:
- name: thanos-compact

View File

@ -47,6 +47,9 @@ spec:
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
{{ toYaml .Values.imagePullSecrets | indent 2 }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: "{{ .Values.priorityClassName }}"
{{- end }}
containers:
- name: thanos-query

View File

@ -47,6 +47,9 @@ spec:
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
{{ toYaml .Values.imagePullSecrets | indent 2 }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: "{{ .Values.priorityClassName }}"
{{- end }}
containers:
- name: thanos-query-frontend

View File

@ -45,6 +45,9 @@ spec:
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
{{ toYaml .Values.imagePullSecrets | indent 2 }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: "{{ .Values.priorityClassName }}"
{{- end }}
containers:
- name: thanos-store

View File

@ -3,6 +3,10 @@ image:
tag: v0.29.0
pullPolicy: IfNotPresent
## PriorityClassName
## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
priorityClassName: ""
store:
enabled: true
# Maximum size of items held in the index cache.

View File

@ -21,12 +21,15 @@ spec:
labels:
app: awsstore
spec:
serviceAccountName: awsstore-serviceaccount
containers:
- image: {{ .Values.awsstore.imageNameAndVersion }}
name: awsstore
# Just sleep forever
command: [ "sleep" ]
args: [ "infinity" ]
serviceAccountName: awsstore-serviceaccount
{{- if .Values.awsstore.priorityClassName }}
priorityClassName: "{{ .Values.awsstore.priorityClassName }}"
{{- end }}
containers:
- image: {{ .Values.awsstore.imageNameAndVersion }}
name: awsstore
# Just sleep forever
command: [ "sleep" ]
args: [ "infinity" ]
{{- end }}
{{- end }}
{{- end }}

View File

@ -98,7 +98,7 @@ spec:
*/}}
{{- $etlBackupBucketSecret := "" }}
{{- if .Values.global.containerSecuritycontext }}
- name: tmp
- name: var-run
emptyDir: { }
- name: cache
emptyDir: { }
@ -509,7 +509,7 @@ spec:
{{- end }}
{{- if .Values.kubecostProductConfigs.cloudIntegrationSecret }}
- name: cloud-integration
mountPath: /var/cloud-integration
mountPath: /var/configs/cloud-integration
{{- end }}
{{- if or .Values.kubecostProductConfigs.serviceKeySecretName .Values.kubecostProductConfigs.createServiceKeySecret }}
- name: service-key-secret
@ -857,7 +857,7 @@ spec:
{{- end }}
{{- with .Values.kubecostModel.cloudCost }}
- name: CLOUD_COST_ENABLED
value: {{ (quote .enabled) | default (quote false) }}
value: {{ (quote .enabled) | default (quote true) }}
{{- with .labelList }}
- name: CLOUD_COST_IS_INCLUDE_LIST
value: {{ (quote .IsIncludeList) | default (quote false) }}
@ -1102,7 +1102,7 @@ spec:
- mountPath: /var/cache/nginx
name: cache
- mountPath: /var/run
name: tmp
name: var-run
{{- end }}
{{- if .Values.kubecostFrontend.tls }}
{{- if .Values.kubecostFrontend.tls.enabled }}

View File

@ -1,6 +1,10 @@
{{- if .Values.kubecostProductConfigs }}
{{- if .Values.kubecostProductConfigs.productKey }}
{{- if .Values.kubecostProductConfigs.productKey.enabled }}
# If the productKey.key is not specified, the configmap will not be created
{{- if .Values.kubecostProductConfigs.productKey.key }}
# If the secretname is specified, the configmap will not be created
{{- if not .Values.kubecostProductConfigs.productKey.secretname }}
apiVersion: v1
kind: ConfigMap
metadata:
@ -14,4 +18,6 @@ data:
{{- end -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- end -}}

View File

@ -22,6 +22,9 @@ spec:
labels:
app: adapter
spec:
{{- if .Values.remoteWrite.postgres.priorityClassName }}
priorityClassName: "{{ .Values.remoteWrite.postgres.priorityClassName }}"
{{- end }}
initContainers:
- name: kubecost-sql-init
image: {{ .Values.remoteWrite.postgres.initImage }}:prod-{{ $.Chart.AppVersion }}

View File

@ -18,6 +18,12 @@ spec:
scrapeTimeout: 10s
path: /metrics
scheme: http
{{- with .Values.serviceMonitor.metricRelabelings }}
metricRelabelings: {{ toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.serviceMonitor.relabelings }}
relabelings: {{ toYaml . | nindent 8 }}
{{- end }}
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}

View File

@ -15,7 +15,11 @@ spec:
provider: {{ required "Specify a valid provider." .Values.agentCsi.secretProvider.provider }}
{{- if .Values.agentCsi.secretProvider.parameters }}
parameters:
{{- .Values.agentCsi.secretProvider.parameters | toYaml | nindent 4 }}
{{- .Values.agentCsi.secretProvider.parameters | toYaml | nindent 4 }}
{{- end }}
{{- if .Values.agentCsi.secretProvider.secretObjects }}
secretObjects:
{{- .Values.agentCsi.secretProvider.secretObjects | toYaml | nindent 2 }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -35,7 +35,7 @@ rules:
- update
- delete
- apiGroups:
- ""
- ''
- events.k8s.io
resources:
- events
@ -61,6 +61,16 @@ rules:
- patch
- update
- delete
- apiGroups:
- ''
resources:
- configmaps
verbs:
- get
- list
- watch
- update
- patch
- apiGroups:
- ''
resources:
@ -75,6 +85,16 @@ rules:
- get
- list
- watch
- apiGroups:
- ''
resources:
- configmaps
resourceNames:
- 'cluster-controller-nsturndown-config'
verbs:
- get
- create
- update
- apiGroups:
- extensions
resources:
@ -165,6 +185,22 @@ subjects:
name: {{ template "kubecost.clusterControllerName" . }}
namespace: {{ .Release.Namespace }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: cluster-controller-continuous-cluster-sizing
namespace: {{ .Release.Namespace }}
labels:
{{- include "cost-analyzer.commonLabels" . | nindent 4 }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: cluster-controller-nsturndown-config
namespace: {{ .Release.Namespace }}
labels:
{{- include "cost-analyzer.commonLabels" . | nindent 4 }}
---
apiVersion: apps/v1
kind: Deployment
metadata:
@ -186,6 +222,9 @@ spec:
labels:
app: {{ template "kubecost.clusterControllerName" . }}
spec:
{{- if .Values.clusterController.priorityClassName }}
priorityClassName: "{{ .Values.clusterController.priorityClassName }}"
{{- end }}
containers:
- name: {{ template "kubecost.clusterControllerName" . }}
image: {{ .Values.clusterController.image }}
@ -218,6 +257,8 @@ spec:
value: {{ .Values.clusterController.logLevel | default "info" }}
- name: CC_KUBESCALER_COST_MODEL_PATH
value: http://{{ $serviceName }}.{{ .Release.Namespace }}:{{ .Values.service.targetPort | default 9090 }}/model
- name: CC_CCL_COST_MODEL_PATH
value: http://{{ $serviceName }}.{{ .Release.Namespace }}:{{ .Values.service.targetPort | default 9090 }}/model
{{- if .Values.clusterController.kubescaler }}
- name: CC_KUBESCALER_DEFAULT_RESIZE_ALL
value: {{ .Values.clusterController.kubescaler.defaultResizeAll | default "false" | quote }}

View File

@ -192,7 +192,7 @@ spec:
{{- end }}
{{- if .Values.kubecostProductConfigs.cloudIntegrationSecret }}
- name: cloud-integration
mountPath: /var/cloud-integration
mountPath: /var/configs/cloud-integration
{{- end }}
{{- if or .Values.kubecostProductConfigs.serviceKeySecretName .Values.kubecostProductConfigs.createServiceKeySecret }}
- name: service-key-secret

View File

@ -21,6 +21,12 @@ spec:
scrapeTimeout: 10s
path: /metrics
scheme: http
{{- with .Values.kubecostMetrics.exporter.serviceMonitor.metricRelabelings }}
metricRelabelings: {{ toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.kubecostMetrics.exporter.serviceMonitor.relabelings }}
relabelings: {{ toYaml . | nindent 8 }}
{{- end }}
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}

View File

@ -17,6 +17,12 @@ spec:
scrapeTimeout: {{ .Values.serviceMonitor.networkCosts.scrapeTimeout }}
path: /metrics
scheme: http
{{- with .Values.serviceMonitor.networkCosts.metricRelabelings }}
metricRelabelings: {{ toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.serviceMonitor.networkCosts.relabelings }}
relabelings: {{ toYaml . | nindent 8 }}
{{- end }}
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}

View File

@ -4,7 +4,7 @@ apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "query-service.fullname" . }}
namespace: {{ .Release.Name }}
namespace: {{ .Release.Namespace }}
labels:
{{ include "query-service.commonLabels" . | nindent 4 }}
spec:

View File

@ -10,7 +10,7 @@ metadata:
spec:
selector:
{{ include "query-service.selectorLabels" . | nindent 4 }}
type: "LoadBalancer"
type: "ClusterIP"
ports:
- name: tcp-query-service
port: 9003

Some files were not shown because too many files have changed in this diff Show More