Charts CI

```
Updated:
  argo/argo-cd:
    - 5.34.6
  bitnami/postgresql:
    - 12.5.6
  bitnami/spark:
    - 7.0.0
  bitnami/wordpress:
    - 16.1.9
  cert-manager/cert-manager:
    - v1.12.1
  cockroach-labs/cockroachdb:
    - 11.0.1
  codefresh/cf-runtime:
    - 1.0.7
  datadog/datadog:
    - 3.30.9
  external-secrets/external-secrets:
    - 0.8.3
  jfrog/artifactory-ha:
    - 107.59.9
  jfrog/artifactory-jcr:
    - 107.59.9
  kubecost/cost-analyzer:
    - 1.103.4
  new-relic/nri-bundle:
    - 5.0.16
  openebs/openebs:
    - 3.7.0
  redpanda/redpanda:
    - 4.0.26
  speedscale/speedscale-operator:
    - 1.3.8
  sysdig/sysdig:
    - 1.15.90
```
pull/768/head
github-actions[bot] 2023-05-30 11:33:49 +00:00
parent 61d688b08e
commit 6ddf4d4807
152 changed files with 1556 additions and 439 deletions

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -1,7 +1,7 @@
annotations:
artifacthub.io/changes: |
- kind: fixed
description: Allow to disable containerSecurityContext
- kind: added
description: Option to set appProtocol for Argocd server https service port
artifacthub.io/signKey: |
fingerprint: 2B8F22F57260EFA67BE1C5824B11F800CD9D2252
url: https://argoproj.github.io/argo-helm/pgp_keys.asc
@ -10,7 +10,7 @@ annotations:
catalog.cattle.io/kube-version: '>=1.22.0-0'
catalog.cattle.io/release-name: argo-cd
apiVersion: v2
appVersion: v2.7.2
appVersion: v2.7.3
dependencies:
- condition: redis-ha.enabled
name: redis-ha
@ -32,4 +32,4 @@ name: argo-cd
sources:
- https://github.com/argoproj/argo-helm/tree/main/charts/argo-cd
- https://github.com/argoproj/argo-cd
version: 5.34.4
version: 5.34.6

View File

@ -31,6 +31,9 @@ spec:
{{- if eq .Values.server.service.type "NodePort" }}
nodePort: {{ .Values.server.service.nodePortHttps }}
{{- end }}
{{- with .Values.server.service.servicePortHttpsAppProtocol }}
appProtocol: {{ . }}
{{- end }}
selector:
{{- include "argo-cd.selectorLabels" (dict "context" . "name" .Values.server.name) | nindent 4 }}
{{- if eq .Values.server.service.type "LoadBalancer" }}

View File

@ -1768,6 +1768,8 @@ server:
servicePortHttpName: http
# -- Server service https port name, can be used to route traffic via istio
servicePortHttpsName: https
# -- Server service https port appProtocol. (should be upper case - i.e. HTTPS)
# servicePortHttpsAppProtocol: HTTPS
# -- LoadBalancer will get created with the IP specified in this field
loadBalancerIP: ""
# -- Source IP ranges to allow access to service from

View File

@ -31,4 +31,4 @@ maintainers:
name: postgresql
sources:
- https://github.com/bitnami/charts/tree/main/bitnami/postgresql
version: 12.5.5
version: 12.5.6

View File

@ -98,7 +98,7 @@ kubectl delete pvc -l release=my-release
| ---------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------- |
| `image.registry` | PostgreSQL image registry | `docker.io` |
| `image.repository` | PostgreSQL image repository | `bitnami/postgresql` |
| `image.tag` | PostgreSQL image tag (immutable tags are recommended) | `15.3.0-debian-11-r4` |
| `image.tag` | PostgreSQL image tag (immutable tags are recommended) | `15.3.0-debian-11-r7` |
| `image.digest` | PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `image.pullPolicy` | PostgreSQL image pull policy | `IfNotPresent` |
| `image.pullSecrets` | Specify image pull secrets | `[]` |
@ -375,7 +375,7 @@ kubectl delete pvc -l release=my-release
| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume | `false` |
| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` |
| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/bitnami-shell` |
| `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `11-debian-11-r118` |
| `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `11-debian-11-r120` |
| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` |
| `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` |
@ -403,7 +403,7 @@ kubectl delete pvc -l release=my-release
| `metrics.enabled` | Start a prometheus exporter | `false` |
| `metrics.image.registry` | PostgreSQL Prometheus Exporter image registry | `docker.io` |
| `metrics.image.repository` | PostgreSQL Prometheus Exporter image repository | `bitnami/postgres-exporter` |
| `metrics.image.tag` | PostgreSQL Prometheus Exporter image tag (immutable tags are recommended) | `0.12.0-debian-11-r89` |
| `metrics.image.tag` | PostgreSQL Prometheus Exporter image tag (immutable tags are recommended) | `0.12.0-debian-11-r91` |
| `metrics.image.digest` | PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `metrics.image.pullPolicy` | PostgreSQL Prometheus Exporter image pull policy | `IfNotPresent` |
| `metrics.image.pullSecrets` | Specify image pull secrets | `[]` |

View File

@ -450,7 +450,7 @@ spec:
{{- end }}
{{- if or .Values.primary.extendedConfiguration .Values.primary.existingExtendedConfigmap }}
- name: postgresql-extended-config
mountPath: /bitnami/postgresql/conf/conf.d/
mountPath: {{ .Values.primary.persistence.mountPath }}/conf/conf.d/
{{- end }}
{{- if .Values.auth.usePasswordFiles }}
- name: postgresql-password
@ -474,7 +474,7 @@ spec:
{{- end }}
{{- if or .Values.primary.configuration .Values.primary.pgHbaConfiguration .Values.primary.existingConfigmap }}
- name: postgresql-config
mountPath: /bitnami/postgresql/conf
mountPath: {{ .Values.primary.persistence.mountPath }}/conf
{{- end }}
{{- if .Values.primary.extraVolumeMounts }}
{{- include "common.tplvalues.render" (dict "value" .Values.primary.extraVolumeMounts "context" $) | nindent 12 }}

View File

@ -369,7 +369,7 @@ spec:
{{- end }}
{{- if .Values.readReplicas.extendedConfiguration }}
- name: postgresql-extended-config
mountPath: /bitnami/postgresql/conf/conf.d/
mountPath: {{ .Values.readReplicas.persistence.mountPath }}/conf/conf.d/
{{- end }}
{{- if .Values.tls.enabled }}
- name: postgresql-certificates

View File

@ -95,7 +95,7 @@ diagnosticMode:
image:
registry: docker.io
repository: bitnami/postgresql
tag: 15.3.0-debian-11-r4
tag: 15.3.0-debian-11-r7
digest: ""
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
@ -1136,7 +1136,7 @@ volumePermissions:
image:
registry: docker.io
repository: bitnami/bitnami-shell
tag: 11-debian-11-r118
tag: 11-debian-11-r120
digest: ""
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
@ -1231,7 +1231,7 @@ metrics:
image:
registry: docker.io
repository: bitnami/postgres-exporter
tag: 0.12.0-debian-11-r89
tag: 0.12.0-debian-11-r91
digest: ""
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.

View File

@ -6,7 +6,7 @@ annotations:
category: Infrastructure
licenses: Apache-2.0
apiVersion: v2
appVersion: 3.3.2
appVersion: 3.4.0
dependencies:
- name: common
repository: file://./charts/common
@ -27,4 +27,4 @@ maintainers:
name: spark
sources:
- https://github.com/bitnami/charts/tree/main/bitnami/spark
version: 6.6.3
version: 7.0.0

View File

@ -80,16 +80,16 @@ The command removes all the Kubernetes components associated with the chart and
### Spark parameters
| Name | Description | Value |
| ------------------- | ----------------------------------------------------------------------------------------------------- | --------------------- |
| `image.registry` | Spark image registry | `docker.io` |
| `image.repository` | Spark image repository | `bitnami/spark` |
| `image.tag` | Spark image tag (immutable tags are recommended) | `3.3.2-debian-11-r30` |
| `image.digest` | Spark image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `image.pullPolicy` | Spark image pull policy | `IfNotPresent` |
| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` |
| `image.debug` | Enable image debug mode | `false` |
| `hostNetwork` | Enable HOST Network | `false` |
| Name | Description | Value |
| ------------------- | ----------------------------------------------------------------------------------------------------- | -------------------- |
| `image.registry` | Spark image registry | `docker.io` |
| `image.repository` | Spark image repository | `bitnami/spark` |
| `image.tag` | Spark image tag (immutable tags are recommended) | `3.4.0-debian-11-r0` |
| `image.digest` | Spark image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `image.pullPolicy` | Spark image pull policy | `IfNotPresent` |
| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` |
| `image.debug` | Enable image debug mode | `false` |
| `hostNetwork` | Enable HOST Network | `false` |
### Spark master parameters

View File

@ -92,7 +92,7 @@ diagnosticMode:
image:
registry: docker.io
repository: bitnami/spark
tag: 3.3.2-debian-11-r30
tag: 3.4.0-debian-11-r0
digest: ""
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'

View File

@ -40,4 +40,4 @@ maintainers:
name: wordpress
sources:
- https://github.com/bitnami/charts/tree/main/bitnami/wordpress
version: 16.1.8
version: 16.1.9

View File

@ -80,7 +80,7 @@ The command removes all the Kubernetes components associated with the chart and
| ------------------- | --------------------------------------------------------------------------------------------------------- | -------------------- |
| `image.registry` | WordPress image registry | `docker.io` |
| `image.repository` | WordPress image repository | `bitnami/wordpress` |
| `image.tag` | WordPress image tag (immutable tags are recommended) | `6.2.2-debian-11-r2` |
| `image.tag` | WordPress image tag (immutable tags are recommended) | `6.2.2-debian-11-r3` |
| `image.digest` | WordPress image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `image.pullPolicy` | WordPress image pull policy | `IfNotPresent` |
| `image.pullSecrets` | WordPress image pull secrets | `[]` |
@ -247,7 +247,7 @@ The command removes all the Kubernetes components associated with the chart and
| `volumePermissions.enabled` | Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup` | `false` |
| `volumePermissions.image.registry` | Bitnami Shell image registry | `docker.io` |
| `volumePermissions.image.repository` | Bitnami Shell image repository | `bitnami/bitnami-shell` |
| `volumePermissions.image.tag` | Bitnami Shell image tag (immutable tags are recommended) | `11-debian-11-r118` |
| `volumePermissions.image.tag` | Bitnami Shell image tag (immutable tags are recommended) | `11-debian-11-r119` |
| `volumePermissions.image.digest` | Bitnami Shell image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `volumePermissions.image.pullPolicy` | Bitnami Shell image pull policy | `IfNotPresent` |
| `volumePermissions.image.pullSecrets` | Bitnami Shell image pull secrets | `[]` |
@ -279,7 +279,7 @@ The command removes all the Kubernetes components associated with the chart and
| `metrics.enabled` | Start a sidecar prometheus exporter to expose metrics | `false` |
| `metrics.image.registry` | Apache exporter image registry | `docker.io` |
| `metrics.image.repository` | Apache exporter image repository | `bitnami/apache-exporter` |
| `metrics.image.tag` | Apache exporter image tag (immutable tags are recommended) | `0.13.3-debian-11-r9` |
| `metrics.image.tag` | Apache exporter image tag (immutable tags are recommended) | `0.13.3-debian-11-r10` |
| `metrics.image.digest` | Apache exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `metrics.image.pullPolicy` | Apache exporter image pull policy | `IfNotPresent` |
| `metrics.image.pullSecrets` | Apache exporter image pull secrets | `[]` |

View File

@ -73,7 +73,7 @@ diagnosticMode:
image:
registry: docker.io
repository: bitnami/wordpress
tag: 6.2.2-debian-11-r2
tag: 6.2.2-debian-11-r3
digest: ""
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
@ -763,7 +763,7 @@ volumePermissions:
image:
registry: docker.io
repository: bitnami/bitnami-shell
tag: 11-debian-11-r118
tag: 11-debian-11-r119
digest: ""
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
@ -857,7 +857,7 @@ metrics:
image:
registry: docker.io
repository: bitnami/apache-exporter
tag: 0.13.3-debian-11-r9
tag: 0.13.3-debian-11-r10
digest: ""
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.

View File

@ -9,7 +9,7 @@ annotations:
catalog.cattle.io/namespace: cert-manager
catalog.cattle.io/release-name: cert-manager
apiVersion: v1
appVersion: v1.12.0
appVersion: v1.12.1
description: A Helm chart for cert-manager
home: https://github.com/cert-manager/cert-manager
icon: https://raw.githubusercontent.com/cert-manager/cert-manager/d53c0b9270f8cd90d908460d69502694e1838f5f/logo/logo-small.png
@ -26,4 +26,4 @@ maintainers:
name: cert-manager
sources:
- https://github.com/cert-manager/cert-manager
version: v1.12.0
version: v1.12.1

View File

@ -19,7 +19,7 @@ Before installing the chart, you must first install the cert-manager CustomResou
This is performed in a separate step to allow you to easily uninstall and reinstall cert-manager without deleting your installed custom resources.
```bash
$ kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.12.0/cert-manager.crds.yaml
$ kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.12.1/cert-manager.crds.yaml
```
To install the chart with the release name `my-release`:
@ -29,7 +29,7 @@ To install the chart with the release name `my-release`:
$ helm repo add jetstack https://charts.jetstack.io
## Install the cert-manager helm chart
$ helm install my-release --namespace cert-manager --version v1.12.0 jetstack/cert-manager
$ helm install my-release --namespace cert-manager --version v1.12.1 jetstack/cert-manager
```
In order to begin issuing certificates, you will need to set up a ClusterIssuer
@ -65,7 +65,7 @@ If you want to completely uninstall cert-manager from your cluster, you will als
delete the previously installed CustomResourceDefinition resources:
```console
$ kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.12.0/cert-manager.crds.yaml
$ kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.12.1/cert-manager.crds.yaml
```
## Configuration
@ -86,7 +86,7 @@ The following table lists the configurable parameters of the cert-manager chart
| `global.leaderElection.retryPeriod` | The duration the clients should wait between attempting acquisition and renewal of a leadership | |
| `installCRDs` | If true, CRD resources will be installed as part of the Helm chart. If enabled, when uninstalling CRD resources will be deleted causing all installed custom resources to be DELETED | `false` |
| `image.repository` | Image repository | `quay.io/jetstack/cert-manager-controller` |
| `image.tag` | Image tag | `v1.12.0` |
| `image.tag` | Image tag | `v1.12.1` |
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
| `replicaCount` | Number of cert-manager replicas | `1` |
| `clusterResourceNamespace` | Override the namespace used to store DNS provider credentials etc. for ClusterIssuer resources | Same namespace as cert-manager pod |
@ -169,7 +169,7 @@ The following table lists the configurable parameters of the cert-manager chart
| `webhook.tolerations` | Node tolerations for webhook pod assignment | `[]` |
| `webhook.topologySpreadConstraints` | Topology spread constraints for webhook pod assignment | `[]` |
| `webhook.image.repository` | Webhook image repository | `quay.io/jetstack/cert-manager-webhook` |
| `webhook.image.tag` | Webhook image tag | `v1.12.0` |
| `webhook.image.tag` | Webhook image tag | `v1.12.1` |
| `webhook.image.pullPolicy` | Webhook image pull policy | `IfNotPresent` |
| `webhook.securePort` | The port that the webhook should listen on for requests. | `10250` |
| `webhook.securityContext` | Security context for webhook pod assignment | refer to [Default Security Contexts](#default-security-contexts) |
@ -207,12 +207,12 @@ The following table lists the configurable parameters of the cert-manager chart
| `cainjector.tolerations` | Node tolerations for cainjector pod assignment | `[]` |
| `cainjector.topologySpreadConstraints` | Topology spread constraints for cainjector pod assignment | `[]` |
| `cainjector.image.repository` | cainjector image repository | `quay.io/jetstack/cert-manager-cainjector` |
| `cainjector.image.tag` | cainjector image tag | `v1.12.0` |
| `cainjector.image.tag` | cainjector image tag | `v1.12.1` |
| `cainjector.image.pullPolicy` | cainjector image pull policy | `IfNotPresent` |
| `cainjector.securityContext` | Security context for cainjector pod assignment | refer to [Default Security Contexts](#default-security-contexts) |
| `cainjector.containerSecurityContext` | Security context to be set on cainjector component container | refer to [Default Security Contexts](#default-security-contexts) |
| `acmesolver.image.repository` | acmesolver image repository | `quay.io/jetstack/cert-manager-acmesolver` |
| `acmesolver.image.tag` | acmesolver image tag | `v1.12.0` |
| `acmesolver.image.tag` | acmesolver image tag | `v1.12.1` |
| `acmesolver.image.pullPolicy` | acmesolver image pull policy | `IfNotPresent` |
| `startupapicheck.enabled` | Toggles whether the startupapicheck Job should be installed | `true` |
| `startupapicheck.securityContext` | Security context for startupapicheck pod assignment | refer to [Default Security Contexts](#default-security-contexts) |
@ -228,7 +228,7 @@ The following table lists the configurable parameters of the cert-manager chart
| `startupapicheck.tolerations` | Node tolerations for startupapicheck pod assignment | `[]` |
| `startupapicheck.podLabels` | Optional additional labels to add to the startupapicheck Pods | `{}` |
| `startupapicheck.image.repository` | startupapicheck image repository | `quay.io/jetstack/cert-manager-ctl` |
| `startupapicheck.image.tag` | startupapicheck image tag | `v1.12.0` |
| `startupapicheck.image.tag` | startupapicheck image tag | `v1.12.1` |
| `startupapicheck.image.pullPolicy` | startupapicheck image pull policy | `IfNotPresent` |
| `startupapicheck.serviceAccount.create` | If `true`, create a new service account for the startupapicheck component | `true` |
| `startupapicheck.serviceAccount.name` | Service account for the startupapicheck component to be used. If not set and `startupapicheck.serviceAccount.create` is `true`, a name is generated using the fullname template | |

View File

@ -69,8 +69,11 @@ podDisruptionBudget:
# minAvailable and maxUnavailable can either be set to an integer (e.g. 1)
# or a percentage value (e.g. 25%)
# Comma separated list of feature gates that should be enabled on the
# controller pod & webhook pod.
# Comma separated list of feature gates that should be enabled on the controller
# Note: do not use this field to pass feature gate values into webhook
# component as this behaviour relies on a bug that will be fixed in cert-manager 1.13
# https://github.com/cert-manager/cert-manager/pull/6093
# Use webhook.extraArgs to pass --feature-gates flag directly instead.
featureGates: ""
# The maximum number of challenges that can be scheduled as 'processing' at once

View File

@ -4,7 +4,7 @@ annotations:
catalog.cattle.io/kube-version: '>=1.8-0'
catalog.cattle.io/release-name: cockroachdb
apiVersion: v1
appVersion: 23.1.1
appVersion: 23.1.2
description: CockroachDB is a scalable, survivable, strongly-consistent SQL database.
home: https://www.cockroachlabs.com
icon: https://raw.githubusercontent.com/cockroachdb/cockroach/master/docs/media/cockroach_db.png
@ -14,4 +14,4 @@ maintainers:
name: cockroachdb
sources:
- https://github.com/cockroachdb/cockroach
version: 11.0.0
version: 11.0.1

View File

@ -229,10 +229,10 @@ kubectl get pods \
```
```
my-release-cockroachdb-0 cockroachdb/cockroach:v23.1.1
my-release-cockroachdb-1 cockroachdb/cockroach:v23.1.1
my-release-cockroachdb-2 cockroachdb/cockroach:v23.1.1
my-release-cockroachdb-3 cockroachdb/cockroach:v23.1.1
my-release-cockroachdb-0 cockroachdb/cockroach:v23.1.2
my-release-cockroachdb-1 cockroachdb/cockroach:v23.1.2
my-release-cockroachdb-2 cockroachdb/cockroach:v23.1.2
my-release-cockroachdb-3 cockroachdb/cockroach:v23.1.2
```
Resume normal operations. Once you are comfortable that the stability and performance of the cluster is what you'd expect post-upgrade, finalize the upgrade:
@ -287,7 +287,7 @@ Verify that no pod is deleted and then upgrade as normal. A new StatefulSet will
For more information about upgrading a cluster to the latest major release of CockroachDB, see [Upgrade to CockroachDB v21.1](https://www.cockroachlabs.com/docs/stable/upgrade-cockroach-version.html).
Note that there are some backward-incompatible changes to SQL features between versions 20.2 and 21.1. For details, see the [CockroachDB v23.1.1 release notes](https://www.cockroachlabs.com/docs/releases/v23.1.1.html#backward-incompatible-changes).
Note that there are some backward-incompatible changes to SQL features between versions 20.2 and 21.1. For details, see the [CockroachDB v23.1.2 release notes](https://www.cockroachlabs.com/docs/releases/v23.1.2.html#backward-incompatible-changes).
## Configuration
@ -316,7 +316,7 @@ For details see the [`values.yaml`](values.yaml) file.
| `conf.store.size` | CockroachDB storage size | `""` |
| `conf.store.attrs` | CockroachDB storage attributes | `""` |
| `image.repository` | Container image name | `cockroachdb/cockroach` |
| `image.tag` | Container image tag | `v23.1.1` |
| `image.tag` | Container image tag | `v23.1.2` |
| `image.pullPolicy` | Container pull policy | `IfNotPresent` |
| `image.credentials` | `registry`, `user` and `pass` credentials to pull private image | `{}` |
| `statefulset.replicas` | StatefulSet replicas number | `3` |

View File

@ -1,7 +1,7 @@
# Generated file, DO NOT EDIT. Source: build/templates/values.yaml
image:
repository: cockroachdb/cockroach
tag: v23.1.1
tag: v23.1.2
pullPolicy: IfNotPresent
credentials: {}
# registry: docker.io

View File

@ -15,4 +15,4 @@ maintainers:
- name: codefresh
url: https://codefresh-io.github.io/
name: cf-runtime
version: 1.0.6
version: 1.0.7

View File

@ -1,6 +1,6 @@
## Codefresh Runner
![Version: 1.0.6](https://img.shields.io/badge/Version-1.0.6-informational?style=flat-square)
![Version: 1.0.7](https://img.shields.io/badge/Version-1.0.7-informational?style=flat-square)
## Prerequisites
@ -92,7 +92,7 @@ Kubernetes: `>=1.19.0-0`
| re.dindDaemon.tlsverify | bool | `true` | |
| re.serviceAccount | object | `{"annotations":{}}` | Set annotation on engine Service Account Ref: https://codefresh.io/docs/docs/administration/codefresh-runner/#injecting-aws-arn-roles-into-the-cluster |
| runner.env | object | `{}` | Add additional env vars |
| runner.image | string | `"codefresh/venona:1.9.15"` | Set runner image |
| runner.image | string | `"codefresh/venona:1.9.16"` | Set runner image |
| runner.nodeSelector | object | `{}` | Set runner node selector |
| runner.resources | object | `{}` | Set runner requests and limits |
| runner.tolerations | list | `[]` | Set runner tolerations |
@ -111,7 +111,7 @@ Kubernetes: `>=1.19.0-0`
| storage.gcedisk.volumeType | string | `"pd-ssd"` | Set GCP volume backend type (`pd-ssd`/`pd-standard`) |
| storage.local.volumeParentDir | string | `"/var/lib/codefresh/dind-volumes"` | Set volume path on the host filesystem |
| storage.localVolumeMonitor.env | object | `{}` | |
| storage.localVolumeMonitor.image | string | `"codefresh/dind-volume-utils:1.29.3"` | Set `dind-lv-monitor` image |
| storage.localVolumeMonitor.image | string | `"codefresh/dind-volume-utils:1.29.4"` | Set `dind-lv-monitor` image |
| storage.localVolumeMonitor.initContainer.image | string | `"alpine"` | |
| storage.localVolumeMonitor.nodeSelector | object | `{}` | |
| storage.localVolumeMonitor.resources | object | `{}` | |

View File

@ -27,7 +27,7 @@ dockerRegistry: "quay.io"
# @default -- See below
runner:
# -- Set runner image
image: "codefresh/venona:1.9.15"
image: "codefresh/venona:1.9.16"
# -- Add additional env vars
env: {}
# E.g.
@ -119,7 +119,7 @@ storage:
# @default -- See below
localVolumeMonitor:
# -- Set `dind-lv-monitor` image
image: codefresh/dind-volume-utils:1.29.3
image: codefresh/dind-volume-utils:1.29.4
initContainer:
image: alpine
nodeSelector: {}

View File

@ -1,5 +1,13 @@
# Datadog changelog
## 3.30.9
* Pass its pod name to the cluster-agent. This is used by cluster agent 7.46+ to make leader election work when using host network.
## 3.30.8
* Update `fips.image.tag` to `0.5.2` version
## 3.30.7
* Fix Windows support of `agents.customAgentConfig` to avoid bind mount of a file.

View File

@ -19,4 +19,4 @@ name: datadog
sources:
- https://app.datadoghq.com/account/settings#agent/kubernetes
- https://github.com/DataDog/datadog-agent
version: 3.30.7
version: 3.30.9

View File

@ -1,6 +1,6 @@
# Datadog
![Version: 3.30.7](https://img.shields.io/badge/Version-3.30.7-informational?style=flat-square) ![AppVersion: 7](https://img.shields.io/badge/AppVersion-7-informational?style=flat-square)
![Version: 3.30.9](https://img.shields.io/badge/Version-3.30.9-informational?style=flat-square) ![AppVersion: 7](https://img.shields.io/badge/AppVersion-7-informational?style=flat-square)
[Datadog](https://www.datadoghq.com/) is a hosted infrastructure monitoring platform. This chart adds the Datadog Agent to all nodes in your cluster via a DaemonSet. It also optionally depends on the [kube-state-metrics chart](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-state-metrics). For more information about monitoring Kubernetes with Datadog, please refer to the [Datadog documentation website](https://docs.datadoghq.com/agent/basic_agent_usage/kubernetes/).
@ -739,7 +739,7 @@ helm install <RELEASE_NAME> \
| fips.image.name | string | `"fips-proxy"` | |
| fips.image.pullPolicy | string | `"IfNotPresent"` | Datadog the FIPS sidecar image pull policy |
| fips.image.repository | string | `nil` | |
| fips.image.tag | string | `"0.5.0"` | |
| fips.image.tag | string | `"0.5.2"` | Define the FIPS sidecar container version to use. |
| fips.local_address | string | `"127.0.0.1"` | |
| fips.port | int | `9803` | |
| fips.portRange | int | `15` | |

View File

@ -150,6 +150,10 @@ spec:
{{- end }}
{{- end }}
env:
- name: DD_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: DD_HEALTH_PORT
{{- $healthPort := .Values.clusterAgent.healthPort }}
value: {{ $healthPort | quote }}

View File

@ -1173,8 +1173,8 @@ fips:
## fips.image.name -- Define the FIPS sidecar container image name.
name: fips-proxy
# agents.image.tag -- Define the FIPS sidecar container version to use.
tag: 0.5.0
# fips.image.tag -- Define the FIPS sidecar container version to use.
tag: 0.5.2
# fips.image.pullPolicy -- Datadog the FIPS sidecar image pull policy
pullPolicy: IfNotPresent

View File

@ -4,7 +4,7 @@ annotations:
catalog.cattle.io/kube-version: '>= 1.19.0-0'
catalog.cattle.io/release-name: external-secrets
apiVersion: v2
appVersion: v0.8.2
appVersion: v0.8.3
description: External secret management for Kubernetes
home: https://github.com/external-secrets/external-secrets
icon: https://raw.githubusercontent.com/external-secrets/external-secrets/main/assets/eso-logo-large.png
@ -17,4 +17,4 @@ maintainers:
name: mcavoyk
name: external-secrets
type: application
version: 0.8.2
version: 0.8.3

View File

@ -4,7 +4,7 @@
[//]: # (README.md generated by gotmpl. DO NOT EDIT.)
![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.8.2](https://img.shields.io/badge/Version-0.8.2-informational?style=flat-square)
![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.8.3](https://img.shields.io/badge/Version-0.8.3-informational?style=flat-square)
External secret management for Kubernetes

View File

@ -7,8 +7,8 @@ should match snapshot of default values:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: external-secrets-cert-controller
app.kubernetes.io/version: v0.8.2
helm.sh/chart: external-secrets-0.8.2
app.kubernetes.io/version: v0.8.3
helm.sh/chart: external-secrets-0.8.3
name: RELEASE-NAME-external-secrets-cert-controller
namespace: NAMESPACE
spec:
@ -33,7 +33,7 @@ should match snapshot of default values:
- --service-namespace=NAMESPACE
- --secret-name=RELEASE-NAME-external-secrets-webhook
- --secret-namespace=NAMESPACE
image: ghcr.io/external-secrets/external-secrets:v0.8.2
image: ghcr.io/external-secrets/external-secrets:v0.8.3
imagePullPolicy: IfNotPresent
name: cert-controller
ports:

View File

@ -7,8 +7,8 @@ should match snapshot of default values:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: external-secrets
app.kubernetes.io/version: v0.8.2
helm.sh/chart: external-secrets-0.8.2
app.kubernetes.io/version: v0.8.3
helm.sh/chart: external-secrets-0.8.3
name: RELEASE-NAME-external-secrets
namespace: NAMESPACE
spec:
@ -28,7 +28,7 @@ should match snapshot of default values:
containers:
- args:
- --concurrent=1
image: ghcr.io/external-secrets/external-secrets:v0.8.2
image: ghcr.io/external-secrets/external-secrets:v0.8.3
imagePullPolicy: IfNotPresent
name: external-secrets
ports:

View File

@ -7,8 +7,8 @@ should match snapshot of default values:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: external-secrets-webhook
app.kubernetes.io/version: v0.8.2
helm.sh/chart: external-secrets-0.8.2
app.kubernetes.io/version: v0.8.3
helm.sh/chart: external-secrets-0.8.3
name: RELEASE-NAME-external-secrets-webhook
namespace: NAMESPACE
spec:
@ -34,7 +34,7 @@ should match snapshot of default values:
- --check-interval=5m
- --metrics-addr=:8080
- --healthz-addr=:8081
image: ghcr.io/external-secrets/external-secrets:v0.8.2
image: ghcr.io/external-secrets/external-secrets:v0.8.3
imagePullPolicy: IfNotPresent
name: webhook
ports:

View File

@ -1,6 +1,29 @@
# JFrog Artifactory-ha Chart Changelog
All changes to this chart will be documented in this file
## [107.59.9] - May 8, 2023
* Fixed reference of `terminationGracePeriodSeconds`
* **Breaking change**
* Updated the defaults of replicaCount (Values.artifactory.primary.replicaCount and Values.artifactory.node.replicaCount) to support Cloud-Native High Availability. Refer [Cloud-Native High Availability](https://jfrog.com/help/r/jfrog-installation-setup-documentation/cloud-native-high-availability)
* Updated the values of the recommended resources - values-small, values-medium and values-large according to the Cloud-Native HA support.
* **IMPORTANT**
* In the absence of custom parameters for primary.replicaCount and node.replicaCount on your deployment, it is recommended to specify the current values explicitly to prevent any undesired changes to the deployment structure.
* Please be advised that the configuration for resources allocation (requests, limits, javaOpts, affinity rules, etc) will now be applied solely under Values.artifactory.primary when using the new defaults.
* **Upgrade**
* Upgrade from primary-members to primary-only is recommended, and can be done by deploy the chart with the new values.
* During the upgrade, members pods should be deleted and new primary pods should be created. This might trigger the creation of new PVCs.
* Added Support for Cold Artifact Storage as part of the systemYaml configuration (disabled by default)
* Added new binary provider `s3-storage-v3-archive`
* Fixed jfconnect disabling as micro-service on non-splitcontainers
## [107.58.0] - Mar 23, 2023
* Updated postgresql multi-arch tag version to `13.10.0-debian-11-r14`
* Removed obselete remove-lost-found initContainer`
* Added env JF_SHARED_NODE_HAENABLED under frontend when running in the container split mode
## [107.57.0] - Mar 02, 2023
* Updated initContainerImage and logger image to `ubi9/ubi-minimal:9.1.0.1793`
## [107.55.0] - Feb 21, 2023
* Updated initContainerImage and logger image to `ubi9/ubi-minimal:9.1.0.1760`
* Adding a custom preStop to Artifactory router for allowing graceful termination to complete

View File

@ -4,7 +4,7 @@ annotations:
catalog.cattle.io/kube-version: '>= 1.14.0-0'
catalog.cattle.io/release-name: artifactory-ha
apiVersion: v2
appVersion: 7.55.14
appVersion: 7.59.9
dependencies:
- condition: postgresql.enabled
name: postgresql
@ -26,4 +26,4 @@ name: artifactory-ha
sources:
- https://github.com/jfrog/charts
type: application
version: 107.55.14
version: 107.59.9

View File

@ -5,10 +5,6 @@ artifactory:
masterKey: FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
persistence:
enabled: false
# Remove extraEnvironmentVariables 7.45.x onwards
extraEnvironmentVariables:
- name: JF_JFCONNECT_ENABLED
value: "false"
replicator:
enabled: true
ingress:

View File

@ -5,10 +5,6 @@ artifactory:
masterKey: FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
persistence:
enabled: false
# Remove extraEnvironmentVariables 7.45.x onwards
extraEnvironmentVariables:
- name: JF_JFCONNECT_ENABLED
value: "false"
replicator:
enabled: true
ingress:

View File

@ -124,7 +124,7 @@
</provider>
</config>
{{- end }}
{{- if or (eq .Values.artifactory.persistence.type "aws-s3-v3") (eq .Values.artifactory.persistence.type "s3-storage-v3-direct") }}
{{- if or (eq .Values.artifactory.persistence.type "aws-s3-v3") (eq .Values.artifactory.persistence.type "s3-storage-v3-direct") (eq .Values.artifactory.persistence.type "s3-storage-v3-archive") }}
<!-- AWS S3 V3 -->
<config version="2">
{{- if eq .Values.artifactory.persistence.type "aws-s3-v3" }}
@ -146,6 +146,13 @@
<provider id="s3-storage-v3" type="s3-storage-v3"/>
</provider>
</chain>
{{- else if eq .Values.artifactory.persistence.type "s3-storage-v3-archive" }}
<!-- s3-storage-v3 default chain based on AWS S3 client for archive purposes-->
<chain> <!--template="s3-storage-v3-archive"-->
<provider id="s3-storage-v3-archive" type="s3-storage-v3-archive">
<provider id="s3-storage-v3" type="s3-storage-v3"/>
</provider>
</chain>
{{- end }}
{{- if eq .Values.artifactory.persistence.type "aws-s3-v3" }}

View File

@ -122,4 +122,14 @@ jconsole {{ template "artifactory-ha.primary.name" . }}:{{ .Values.artifactory.p
{{- if .Values.artifactory.node.javaOpts.jmx.enabled }}
jconsole {{ template "artifactory-ha.fullname" . }}:{{ .Values.artifactory.node.javaOpts.jmx.port }}
{{- end }}
{{- end }}
{{- if ge (.Values.artifactory.node.replicaCount | int) 1 }}
***************************************** WARNING *****************************************************************************
* Currently member node(s) are enabled, will be depreciated in upcoming releases *
* It is recommended to upgrade from primary-members to primary-only. *
* It can be done by deploying the chart ( >=107.59.x) with the new values. Also, please refer to changelog of 107.59.x chart *
* More Info: https://jfrog.com/help/r/jfrog-installation-setup-documentation/cloud-native-high-availability *
*******************************************************************************************************************************
{{- end }}

View File

@ -360,6 +360,9 @@ Resolve requiredServiceTypes value
*/}}
{{- define "artifactory-ha.router.requiredServiceTypes" -}}
{{- $requiredTypes := "jfrt,jfac" -}}
{{- if not .Values.access.enabled -}}
{{- $requiredTypes = "jfrt" -}}
{{- end -}}
{{- if .Values.observability.enabled -}}
{{- $requiredTypes = printf "%s,%s" $requiredTypes "jfob" -}}
{{- end -}}

View File

@ -175,8 +175,6 @@ spec:
{{- else }}
cp -fv /tmp/etc/system.yaml {{ .Values.artifactory.persistence.mountPath }}/etc/system.yaml;
{{- end }}
echo "Remove {{ .Values.artifactory.persistence.mountPath }}/lost+found folder if exists";
rm -rfv {{ .Values.artifactory.persistence.mountPath }}/lost+found;
echo "Copy binarystore.xml file";
mkdir -p {{ .Values.artifactory.persistence.mountPath }}/etc/artifactory;
cp -fv /tmp/etc/artifactory/binarystore.xml {{ .Values.artifactory.persistence.mountPath }}/etc/artifactory/binarystore.xml;
@ -511,6 +509,8 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.name
- name : JF_SHARED_NODE_HAENABLED
value: "true"
{{- with .Values.frontend.extraEnvironmentVariables }}
{{ tpl (toYaml .) $ | indent 8 }}
{{- end }}
@ -884,10 +884,10 @@ spec:
value: "false"
- name : JF_OBSERVABILITY_ENABLED
value: "false"
- name : JF_JFCONNECT_ENABLED
value: "true"
- name : JF_JFCONNECT_SERVICE_ENABLED
value: "false"
- name : JF_JFCONNECT_ENABLED
value: "true"
- name : JF_INTEGRATION_ENABLED
value: "false"
{{- end }}

View File

@ -149,37 +149,6 @@ spec:
- mountPath: {{ .Values.artifactory.persistence.mountPath | quote }}
name: volume
{{- end }}
- name: "remove-lost-found"
image: "{{ .Values.initContainerImage }}"
imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }}
securityContext:
runAsNonRoot: true
allowPrivilegeEscalation: false
capabilities:
drop:
- NET_RAW
resources:
{{ toYaml .Values.initContainers.resources | indent 10 }}
command:
- 'bash'
- '-c'
- >
rm -rfv {{ .Values.artifactory.persistence.mountPath }}/lost+found;
rm -rfv {{ tpl .Values.artifactory.persistence.fileSystem.existingSharedClaim.dataDir . }}/lost+found;
rm -rfv {{ .Values.artifactory.persistence.fileSystem.existingSharedClaim.backupDir }}/lost+found;
volumeMounts:
- name: volume
mountPath: "{{ .Values.artifactory.persistence.mountPath }}"
{{- if eq .Values.artifactory.persistence.type "file-system" }}
{{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }}
{{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) }}
- name: artifactory-ha-data-{{ $sharedClaimNumber }}
mountPath: "{{ tpl $.Values.artifactory.persistence.fileSystem.existingSharedClaim.dataDir $ }}/filestore{{ $sharedClaimNumber }}"
{{- end }}
- name: artifactory-ha-backup
mountPath: "{{ $.Values.artifactory.persistence.fileSystem.existingSharedClaim.backupDir }}"
{{- end }}
{{- end }}
{{- if or (and .Values.artifactory.admin.secret .Values.artifactory.admin.dataKey) .Values.artifactory.admin.password }}
- name: "access-bootstrap-creds"
image: "{{ .Values.initContainerImage }}"
@ -239,8 +208,6 @@ spec:
{{- else }}
cp -fv /tmp/etc/system.yaml {{ .Values.artifactory.persistence.mountPath }}/etc/system.yaml;
{{- end }}
echo "Remove {{ .Values.artifactory.persistence.mountPath }}/lost+found folder if exists";
rm -rfv {{ .Values.artifactory.persistence.mountPath }}/lost+found;
echo "Copy binarystore.xml file";
mkdir -p {{ .Values.artifactory.persistence.mountPath }}/etc/artifactory;
cp -fv /tmp/etc/artifactory/binarystore.xml {{ .Values.artifactory.persistence.mountPath }}/etc/artifactory/binarystore.xml;
@ -632,6 +599,8 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.name
- name : JF_SHARED_NODE_HAENABLED
value: "true"
{{- with .Values.frontend.extraEnvironmentVariables }}
{{ tpl (toYaml .) $ | indent 8 }}
{{- end }}
@ -1017,10 +986,10 @@ spec:
value: "false"
- name : JF_OBSERVABILITY_ENABLED
value: "false"
- name : JF_JFCONNECT_ENABLED
value: "true"
- name : JF_JFCONNECT_SERVICE_ENABLED
value: "false"
- name : JF_JFCONNECT_ENABLED
value: "true"
- name : JF_INTEGRATION_ENABLED
value: "false"
{{- end }}

View File

@ -1,5 +1,6 @@
artifactory:
primary:
replicaCount: 4
resources:
requests:
memory: "6Gi"
@ -9,16 +10,4 @@ artifactory:
cpu: "8"
javaOpts:
xms: "6g"
xmx: "8g"
node:
replicaCount: 3
resources:
requests:
memory: "6Gi"
cpu: "4"
limits:
memory: "10Gi"
cpu: "8"
javaOpts:
xms: "6g"
xmx: "8g"
xmx: "8g"

View File

@ -1,5 +1,6 @@
artifactory:
primary:
replicaCount: 3
resources:
requests:
memory: "4Gi"
@ -9,16 +10,4 @@ artifactory:
cpu: "6"
javaOpts:
xms: "4g"
xmx: "6g"
node:
replicaCount: 2
resources:
requests:
memory: "4Gi"
cpu: "2"
limits:
memory: "8Gi"
cpu: "6"
javaOpts:
xms: "4g"
xmx: "6g"
xmx: "6g"

View File

@ -1,17 +1,6 @@
artifactory:
primary:
resources:
requests:
memory: "4Gi"
cpu: "2"
limits:
memory: "6Gi"
cpu: "4"
javaOpts:
xms: "4g"
xmx: "4g"
node:
replicaCount: 1
replicaCount: 2
resources:
requests:
memory: "4Gi"

View File

@ -41,7 +41,7 @@ global:
## String to fully override artifactory-ha.fullname template
##
# fullnameOverride:
initContainerImage: releases-docker.jfrog.io/ubi9/ubi-minimal:9.1.0.1760
initContainerImage: releases-docker.jfrog.io/ubi9/ubi-minimal:9.1.0.1793
installer:
type:
platform:
@ -147,7 +147,7 @@ postgresql:
image:
registry: releases-docker.jfrog.io
repository: bitnami/postgresql
tag: 13.9.0-debian-11-r11
tag: 13.10.0-debian-11-r14
postgresqlUsername: artifactory
postgresqlPassword: ""
postgresqlDatabase: artifactory
@ -202,7 +202,7 @@ logger:
image:
registry: releases-docker.jfrog.io
repository: ubi9/ubi-minimal
tag: 9.1.0.1760
tag: 9.1.0.1793
## You can use a pre-existing secret with keys license_token and iam_role by specifying licenseConfigSecretName
## Example : Create a generic secret using `kubectl create secret generic <secret-name> --from-literal=license_token=${TOKEN} --from-literal=iam_role=${ROLE_ARN}`
aws:
@ -217,7 +217,7 @@ router:
image:
registry: releases-docker.jfrog.io
repository: jfrog/router
tag: 7.61.3
tag: 7.67.0
imagePullPolicy: IfNotPresent
serviceRegistry:
## Service registry (Access) TLS verification skipped if enabled
@ -351,6 +351,11 @@ artifactory:
url: "Elasticsearch url where JFrog Insight is installed For example, http://<ip_address>:8082"
username: ""
password: ""
# Support for Cold Artifact Storage
# set 'coldStorage.enabled' to 'true' only for Artifactory instance that you are designating as the Cold instance
# Refer - https://jfrog.com/help/r/jfrog-platform-administration-documentation/setting-up-cold-artifact-storage
coldStorage:
enabled: false
# This directory is intended for use with NFS eventual configuration for HA
haDataDir:
enabled: false
@ -627,6 +632,10 @@ artifactory:
serviceRegistry:
insecure: {{ .Values.router.serviceRegistry.insecure }}
shared:
{{- if .Values.artifactory.coldStorage.enabled }}
jfrogColdStorage:
coldInstanceEnabled: true
{{- end }}
{{- if .Values.artifactory.openMetrics.enabled }}
metrics:
enabled: true
@ -638,7 +647,7 @@ artifactory:
consoleLog:
enabled: {{ .Values.artifactory.consoleLog }}
extraJavaOpts: >
-Dartifactory.graceful.shutdown.max.request.duration.millis={{ mul .Values.terminationGracePeriodSeconds 1000 }}
-Dartifactory.graceful.shutdown.max.request.duration.millis={{ mul .Values.artifactory.terminationGracePeriodSeconds 1000 }}
-Dartifactory.access.client.max.connections={{ .Values.access.tomcat.connector.maxThreads }}
{{- with .Values.artifactory.primary.javaOpts }}
{{- if .corePoolSize }}
@ -746,6 +755,8 @@ artifactory:
{{- else }}
jfconnect:
enabled: false
jfconnect_service:
enabled: false
{{- end }}
## IMPORTANT: If overriding artifactory.internalPort:
## DO NOT use port lower than 1024 as Artifactory runs as non-root and cannot bind to ports lower than 1024!
@ -829,6 +840,7 @@ artifactory:
## google-storage-v2
## aws-s3-v3
## s3-storage-v3-direct
## s3-storage-v3-archive
## azure-blob
## azure-blob-storage-direct
type: file-system
@ -1041,7 +1053,7 @@ artifactory:
## Set existingClaim to true or false
## If true, you must prepare a PVC with the name e.g `volume-myrelease-artifactory-ha-primary-0`
existingClaim: false
replicaCount: 1
replicaCount: 3
# minAvailable: 1
updateStrategy:
@ -1087,7 +1099,7 @@ artifactory:
## Set existingClaim to true or false
## If true, you must prepare a PVC with the name e.g `volume-myrelease-artifactory-ha-member-0`
existingClaim: false
replicaCount: 2
replicaCount: 0
updateStrategy:
type: RollingUpdate
minAvailable: 1
@ -1189,6 +1201,7 @@ frontend:
periodSeconds: 5
timeoutSeconds: {{ .Values.probes.timeoutSeconds }}
access:
enabled: true
## Enable TLS by changing the tls entry (under the security section) in the access.config.yaml file.
## ref: https://www.jfrog.com/confluence/display/JFROG/Managing+TLS+Certificates#ManagingTLSCertificates
## When security.tls is set to true, JFrog Access will act as the Certificate Authority (CA) and sign the TLS certificates used by all the different JFrog Platform nodes.

View File

@ -1,7 +1,7 @@
# JFrog Container Registry Chart Changelog
All changes to this chart will be documented in this file.
## [107.55.14] - Aug 25, 2022
## [107.59.9] - Aug 25, 2022
* Included event service as mandatory and remove the flag from values.yaml
## [107.41.0] - Jul 22, 2022

View File

@ -4,11 +4,11 @@ annotations:
catalog.cattle.io/kube-version: '>= 1.14.0-0'
catalog.cattle.io/release-name: artifactory-jcr
apiVersion: v2
appVersion: 7.55.14
appVersion: 7.59.9
dependencies:
- name: artifactory
repository: file://./charts/artifactory
version: 107.55.14
version: 107.59.9
description: JFrog Container Registry
home: https://jfrog.com/container-registry/
icon: https://raw.githubusercontent.com/jfrog/charts/ea5c3112c24a973f64f3ccd99747323db292a369/stable/artifactory-jcr/logo/jcr-logo.png
@ -27,4 +27,4 @@ name: artifactory-jcr
sources:
- https://github.com/jfrog/charts
type: application
version: 107.55.14
version: 107.59.9

View File

@ -1,6 +1,20 @@
# JFrog Artifactory Chart Changelog
All changes to this chart will be documented in this file.
## [107.59.9] - May 8, 2023
* Fixed reference of `terminationGracePeriodSeconds`
* Added Support for Cold Artifact Storage as part of the systemYaml configuration (disabled by default)
* Added new binary provider `s3-storage-v3-archive`
* Fixed jfconnect disabling as micro-service on non-splitcontainers
## [107.58.0] - Mar 23, 2023
* Updated postgresql multi-arch tag version to `13.10.0-debian-11-r14`
* Removed obselete remove-lost-found initContainer`
* Added env JF_SHARED_NODE_HAENABLED under frontend when running in the container split mode
## [107.57.0] - Mar 02, 2023
* Updated initContainerImage and logger image to `ubi9/ubi-minimal:9.1.0.1793`
## [107.55.0] - Jan 31, 2023
* Updated initContainerImage and logger image to `ubi9/ubi-minimal:9.1.0.1760`
* Adding a custom preStop to Artifactory router for allowing graceful termination to complete

View File

@ -1,5 +1,5 @@
apiVersion: v2
appVersion: 7.55.14
appVersion: 7.59.9
dependencies:
- condition: postgresql.enabled
name: postgresql
@ -21,4 +21,4 @@ name: artifactory
sources:
- https://github.com/jfrog/charts
type: application
version: 107.55.14
version: 107.59.9

View File

@ -4,10 +4,6 @@ artifactory:
masterKey: FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
persistence:
enabled: false
# Remove extraEnvironmentVariables 7.45.x onwards
extraEnvironmentVariables:
- name: JF_JFCONNECT_ENABLED
value: "false"
replicator:
enabled: true
ingress:

View File

@ -5,10 +5,6 @@ artifactory:
masterKey: FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
persistence:
enabled: false
# Remove extraEnvironmentVariables 7.45.x onwards
extraEnvironmentVariables:
- name: JF_JFCONNECT_ENABLED
value: "false"
replicator:
enabled: true
ingress:

View File

@ -126,7 +126,7 @@
</provider>
</config>
{{- end }}
{{- if or (eq .Values.artifactory.persistence.type "aws-s3-v3") (eq .Values.artifactory.persistence.type "s3-storage-v3-direct") (eq .Values.artifactory.persistence.type "cluster-s3-storage-v3") }}
{{- if or (eq .Values.artifactory.persistence.type "aws-s3-v3") (eq .Values.artifactory.persistence.type "s3-storage-v3-direct") (eq .Values.artifactory.persistence.type "cluster-s3-storage-v3") (eq .Values.artifactory.persistence.type "s3-storage-v3-archive") }}
<!-- AWS S3 V3 -->
<config version="2">
{{- if eq .Values.artifactory.persistence.type "aws-s3-v3" }}
@ -158,13 +158,22 @@
</provider>
</provider>
</chain>
{{- else if eq .Values.artifactory.persistence.type "s3-storage-v3-archive" }}
<!-- s3-storage-v3 default chain based on AWS S3 client for archive purposes-->
<chain> <!--template="s3-storage-v3-archive"-->
<provider id="s3-storage-v3-archive" type="s3-storage-v3-archive">
<provider id="s3-storage-v3" type="s3-storage-v3"/>
</provider>
</chain>
{{- end }}
{{- if or (eq .Values.artifactory.persistence.type "aws-s3-v3") (eq .Values.artifactory.persistence.type "s3-storage-v3-direct") (eq .Values.artifactory.persistence.type "cluster-s3-storage-v3") }}
<!-- Set max cache-fs size -->
<provider id="cache-fs" type="cache-fs">
<maxCacheSize>{{ .Values.artifactory.persistence.maxCacheSize | int64}}</maxCacheSize>
<cacheProviderDir>{{ .Values.artifactory.persistence.cacheProviderDir }}</cacheProviderDir>
</provider>
{{- end }}
{{- if eq .Values.artifactory.persistence.type "cluster-s3-storage-v3" }}
<provider id="sharding-cluster-eventual-s3" type="sharding-cluster">

View File

@ -308,6 +308,9 @@ Resolve requiredServiceTypes value
*/}}
{{- define "artifactory.router.requiredServiceTypes" -}}
{{- $requiredTypes := "jfrt,jfac" -}}
{{- if not .Values.access.enabled -}}
{{- $requiredTypes = "jfrt" -}}
{{- end -}}
{{- if .Values.observability.enabled -}}
{{- $requiredTypes = printf "%s,%s" $requiredTypes "jfob" -}}
{{- end -}}

View File

@ -128,24 +128,6 @@ spec:
- name: artifactory-volume
mountPath: "{{ .Values.artifactory.persistence.mountPath }}"
{{- end }}
- name: "remove-lost-found"
image: "{{ .Values.initContainerImage }}"
imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }}
securityContext:
runAsNonRoot: true
allowPrivilegeEscalation: false
capabilities:
drop:
- NET_RAW
resources:
{{ toYaml .Values.initContainers.resources | indent 10 }}
command:
- 'bash'
- '-c'
- 'rm -rfv {{ .Values.artifactory.persistence.mountPath }}/lost+found {{ .Values.artifactory.persistence.mountPath }}/data/.lock'
volumeMounts:
- name: artifactory-volume
mountPath: "{{ .Values.artifactory.persistence.mountPath }}"
{{- end }}
{{- if or (and .Values.artifactory.admin.secret .Values.artifactory.admin.dataKey) .Values.artifactory.admin.password }}
- name: "access-bootstrap-creds"
@ -205,8 +187,6 @@ spec:
{{- else }}
cp -fv /tmp/etc/system.yaml {{ .Values.artifactory.persistence.mountPath }}/etc/system.yaml;
{{- end }}
echo "Remove {{ .Values.artifactory.persistence.mountPath }}/lost+found folder if exists";
rm -rfv {{ .Values.artifactory.persistence.mountPath }}/lost+found;
echo "Copy binarystore.xml file";
mkdir -p {{ .Values.artifactory.persistence.mountPath }}/etc/artifactory;
cp -fv /tmp/etc/artifactory/binarystore.xml {{ .Values.artifactory.persistence.mountPath }}/etc/artifactory/binarystore.xml;
@ -594,6 +574,10 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.name
{{- if and (gt (.Values.artifactory.replicaCount | int64) 1) (eq (include "artifactory.isImageProType" .) "true") (eq (include "artifactory.isUsingDerby" .) "false") }}
- name : JF_SHARED_NODE_HAENABLED
value: "true"
{{- end }}
{{- with .Values.frontend.extraEnvironmentVariables }}
{{ tpl (toYaml .) $ | indent 8 }}
{{- end }}

View File

@ -42,7 +42,7 @@ global:
## String to fully override artifactory.fullname template
##
# fullnameOverride:
initContainerImage: releases-docker.jfrog.io/ubi9/ubi-minimal:9.1.0.1760
initContainerImage: releases-docker.jfrog.io/ubi9/ubi-minimal:9.1.0.1793
# Init containers
initContainers:
resources:
@ -161,7 +161,7 @@ logger:
image:
registry: releases-docker.jfrog.io
repository: ubi9/ubi-minimal
tag: 9.1.0.1760
tag: 9.1.0.1793
## You can use a pre-existing secret with keys license_token and iam_role by specifying licenseConfigSecretName
## Example : Create a generic secret using `kubectl create secret generic <secret-name> --from-literal=license_token=${TOKEN} --from-literal=iam_role=${ROLE_ARN}`
aws:
@ -176,7 +176,7 @@ router:
image:
registry: releases-docker.jfrog.io
repository: jfrog/router
tag: 7.61.3
tag: 7.67.0
imagePullPolicy: IfNotPresent
serviceRegistry:
## Service registry (Access) TLS verification skipped if enabled
@ -327,6 +327,11 @@ artifactory:
url: "Elasticsearch url where JFrog Insight is installed For example, http://<ip_address>:8082"
username: ""
password: ""
# Support for Cold Artifact Storage
# set 'coldStorage.enabled' to 'true' only for Artifactory instance that you are designating as the Cold instance
# Refer - https://jfrog.com/help/r/jfrog-platform-administration-documentation/setting-up-cold-artifact-storage
coldStorage:
enabled: false
# This directory is intended for use with NFS eventual configuration for HA
haDataDir:
enabled: false
@ -598,6 +603,10 @@ artifactory:
serviceRegistry:
insecure: {{ .Values.router.serviceRegistry.insecure }}
shared:
{{- if .Values.artifactory.coldStorage.enabled }}
jfrogColdStorage:
coldInstanceEnabled: true
{{- end }}
{{- if .Values.artifactory.openMetrics.enabled }}
metrics:
enabled: true
@ -609,7 +618,7 @@ artifactory:
consoleLog:
enabled: {{ .Values.artifactory.consoleLog }}
extraJavaOpts: >
-Dartifactory.graceful.shutdown.max.request.duration.millis={{ mul .Values.terminationGracePeriodSeconds 1000 }}
-Dartifactory.graceful.shutdown.max.request.duration.millis={{ mul .Values.artifactory.terminationGracePeriodSeconds 1000 }}
-Dartifactory.access.client.max.connections={{ .Values.access.tomcat.connector.maxThreads }}
{{- with .Values.artifactory.javaOpts }}
{{- if .corePoolSize }}
@ -711,6 +720,8 @@ artifactory:
{{- else }}
jfconnect:
enabled: false
jfconnect_service:
enabled: false
{{- end }}
annotations: {}
service:
@ -868,6 +879,7 @@ artifactory:
## aws-s3-v3
## s3-storage-v3-direct
## cluster-s3-storage-v3
## s3-storage-v3-archive
## azure-blob
## azure-blob-storage-direct
## cluster-azure-blob-storage
@ -1059,6 +1071,7 @@ frontend:
periodSeconds: 5
timeoutSeconds: {{ .Values.probes.timeoutSeconds }}
access:
enabled: true
## Enable TLS by changing the tls entry (under the security section) in the access.config.yaml file.
## ref: https://www.jfrog.com/confluence/display/JFROG/Managing+TLS+Certificates#ManagingTLSCertificates
## When security.tls is set to true, JFrog Access will act as the Certificate Authority (CA) and sign the TLS certificates used by all the different JFrog Platform nodes.
@ -1714,7 +1727,7 @@ postgresql:
image:
registry: releases-docker.jfrog.io
repository: bitnami/postgresql
tag: 13.9.0-debian-11-r11
tag: 13.10.0-debian-11-r14
postgresqlUsername: artifactory
postgresqlPassword: ""
postgresqlDatabase: artifactory

View File

@ -67,4 +67,4 @@ postgresql:
enabled: true
router:
image:
tag: 7.61.3
tag: 7.67.0

View File

@ -7,7 +7,7 @@ annotations:
catalog.cattle.io/featured: "2"
catalog.cattle.io/release-name: cost-analyzer
apiVersion: v2
appVersion: 1.103.3
appVersion: 1.103.4
dependencies:
- condition: global.grafana.enabled
name: grafana
@ -25,4 +25,4 @@ description: A Helm chart that sets up Kubecost, Prometheus, and Grafana to moni
cloud costs.
icon: https://partner-charts.rancher.io/assets/logos/kubecost.png
name: cost-analyzer
version: 1.103.3
version: 1.103.4

View File

@ -1,5 +1,5 @@
{{- if and (not .Values.agent) (not .Values.cloudAgent) (.Values.kubecostDeployment) (.Values.kubecostDeployment.queryServiceReplicas) }}
{{- if gt .Values.kubecostDeployment.queryServiceReplicas 0 }}
{{- if gt (.Values.kubecostDeployment.queryServiceReplicas | toString | atoi) 0 }}
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:

View File

@ -1,5 +1,5 @@
{{- if and (not .Values.agent) (not .Values.cloudAgent) (.Values.kubecostDeployment) (.Values.kubecostDeployment.queryServiceReplicas) }}
{{- if gt .Values.kubecostDeployment.queryServiceReplicas 0 }}
{{- if gt (.Values.kubecostDeployment.queryServiceReplicas | toString | atoi) 0 }}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:

View File

@ -1,5 +1,5 @@
{{- if and (not .Values.agent) (not .Values.cloudAgent) (.Values.kubecostDeployment) (.Values.kubecostDeployment.queryServiceReplicas) }}
{{- if gt .Values.kubecostDeployment.queryServiceReplicas 0 }}
{{- if gt (.Values.kubecostDeployment.queryServiceReplicas | toString | atoi) 0 }}
apiVersion: apps/v1
kind: Deployment
metadata:

View File

@ -1,5 +1,5 @@
{{- if and (not .Values.agent) (not .Values.cloudAgent) (.Values.kubecostDeployment) (.Values.kubecostDeployment.queryServiceReplicas) }}
{{- if gt .Values.kubecostDeployment.queryServiceReplicas 0 }}
{{- if gt (.Values.kubecostDeployment.queryServiceReplicas | toString | atoi) 0 }}
{{- if .Values.serviceAccount.create }}
apiVersion: v1
kind: ServiceAccount

View File

@ -1,5 +1,5 @@
{{- if and (not .Values.agent) (not .Values.cloudAgent) (.Values.kubecostDeployment) (.Values.kubecostDeployment.queryServiceReplicas) }}
{{- if gt .Values.kubecostDeployment.queryServiceReplicas 0 }}
{{- if gt (.Values.kubecostDeployment.queryServiceReplicas | toString | atoi) 0 }}
kind: Service
apiVersion: v1
metadata:

View File

@ -1,7 +1,7 @@
dependencies:
- name: newrelic-infrastructure
repository: https://newrelic.github.io/nri-kubernetes
version: 3.17.0
version: 3.18.0
- name: nri-prometheus
repository: https://newrelic.github.io/nri-prometheus
version: 2.1.16
@ -19,7 +19,7 @@ dependencies:
version: 4.23.0
- name: nri-kube-events
repository: https://newrelic.github.io/nri-kube-events
version: 3.0.0
version: 3.1.0
- name: newrelic-logging
repository: https://newrelic.github.io/helm-charts
version: 1.14.1
@ -28,9 +28,9 @@ dependencies:
version: 2.1.0
- name: pixie-operator-chart
repository: https://pixie-operator-charts.storage.googleapis.com
version: 0.1.0
version: 0.1.1
- name: newrelic-infra-operator
repository: https://newrelic.github.io/newrelic-infra-operator
version: 2.2.0
digest: sha256:0a1f51c3e283d5a873e74ef4994b8569b6b85a9d46adc7514447d4ce2399146d
generated: "2023-05-16T17:44:36.052686458Z"
digest: sha256:59c20ed37a61cdadda7123c0dc810b094bf93ccff6d9401fdd38c80c0c227fc7
generated: "2023-05-26T23:54:59.043852795Z"

View File

@ -7,7 +7,7 @@ dependencies:
- condition: infrastructure.enabled,newrelic-infrastructure.enabled
name: newrelic-infrastructure
repository: file://./charts/newrelic-infrastructure
version: 3.17.0
version: 3.18.0
- condition: prometheus.enabled,nri-prometheus.enabled
name: nri-prometheus
repository: file://./charts/nri-prometheus
@ -31,7 +31,7 @@ dependencies:
- condition: kubeEvents.enabled,nri-kube-events.enabled
name: nri-kube-events
repository: file://./charts/nri-kube-events
version: 3.0.0
version: 3.1.0
- condition: logging.enabled,newrelic-logging.enabled
name: newrelic-logging
repository: file://./charts/newrelic-logging
@ -44,7 +44,7 @@ dependencies:
condition: pixie-chart.enabled
name: pixie-operator-chart
repository: file://./charts/pixie-operator-chart
version: 0.1.0
version: 0.1.1
- condition: newrelic-infra-operator.enabled
name: newrelic-infra-operator
repository: file://./charts/newrelic-infra-operator
@ -89,4 +89,4 @@ sources:
- https://github.com/newrelic/helm-charts/tree/master/charts/newrelic-logging
- https://github.com/newrelic/helm-charts/tree/master/charts/newrelic-pixie
- https://github.com/newrelic/newrelic-infra-operator/tree/master/charts/newrelic-infra-operator
version: 5.0.15
version: 5.0.16

View File

@ -1,5 +1,5 @@
apiVersion: v2
appVersion: 3.12.0
appVersion: 3.13.0
dependencies:
- name: common-library
repository: https://helm-charts.newrelic.com
@ -35,4 +35,4 @@ sources:
- https://github.com/newrelic/nri-kubernetes/
- https://github.com/newrelic/nri-kubernetes/tree/main/charts/newrelic-infrastructure
- https://github.com/newrelic/infrastructure-agent/
version: 3.17.0
version: 3.18.0

View File

@ -184,6 +184,8 @@ spec:
{{- if include "newrelic.common.privileged" . }}
- name: dev
mountPath: /dev
- name: host-containerd-socket
mountPath: /run/containerd/containerd.sock
- name: host-docker-socket
mountPath: /var/run/docker.sock
- name: log
@ -209,6 +211,9 @@ spec:
- name: dev
hostPath:
path: /dev
- name: host-containerd-socket
hostPath:
path: /run/containerd/containerd.sock
- name: host-docker-socket
hostPath:
path: /var/run/docker.sock

View File

@ -23,14 +23,14 @@ images:
forwarder:
registry: ""
repository: newrelic/k8s-events-forwarder
tag: 1.41.0
tag: 1.42.1
pullPolicy: IfNotPresent
# -- Image for the New Relic Infrastructure Agent plus integrations.
# @default -- See `values.yaml`
agent:
registry: ""
repository: newrelic/infrastructure-bundle
tag: 3.1.8
tag: 3.2.2
pullPolicy: IfNotPresent
# -- Image for the New Relic Kubernetes integration.
# @default -- See `values.yaml`

View File

@ -1,5 +1,5 @@
apiVersion: v2
appVersion: 2.0.0
appVersion: 2.1.0
dependencies:
- name: common-library
repository: https://helm-charts.newrelic.com
@ -35,4 +35,4 @@ sources:
- https://github.com/newrelic/nri-kube-events/
- https://github.com/newrelic/nri-kube-events/tree/main/charts/nri-kube-events
- https://github.com/newrelic/infrastructure-agent/
version: 3.0.0
version: 3.1.0

View File

@ -8,16 +8,35 @@ metadata:
rules:
- apiGroups:
- ""
resources:
- events
- namespaces
- nodes
- jobs
- persistentvolumes
- persistentvolumeclaims
- pods
- services
verbs:
- get
- watch
- list
- apiGroups:
- apps
resources:
- "daemonsets"
- "events"
- "namespaces"
- "nodes"
- "pods"
- "services"
- daemonsets
- deployments
verbs:
- "get"
- "watch"
- "list"
- get
- watch
- list
- apiGroups:
- batch
resources:
- cronjobs
- jobs
verbs:
- get
- watch
- list
{{- end -}}

View File

@ -27,7 +27,7 @@ images:
agent:
registry:
repository: newrelic/k8s-events-forwarder
tag: 1.41.0
tag: 1.42.1
pullPolicy: IfNotPresent
# -- The secrets that are needed to pull images from a custom registry.
pullSecrets: []

View File

@ -1,4 +1,4 @@
apiVersion: v2
name: pixie-operator-chart
type: application
version: 0.1.0
version: 0.1.1

View File

@ -162,6 +162,27 @@ spec:
description: Resources is the resource requirements for a container.
This field cannot be updated once the cluster is created.
properties:
claims:
description: "Claims lists the names of resources, defined
in spec.resourceClaims, that are used by this container.
\n This is an alpha field and requires enabling the DynamicResourceAllocation
feature gate. \n This field is immutable."
items:
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
properties:
name:
description: Name must match the name of one entry in
pod.spec.resourceClaims of the Pod where this field
is used. It makes that resource available inside a
container.
type: string
required:
- name
type: object
type: array
x-kubernetes-list-map-keys:
- name
x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:

View File

@ -3,6 +3,12 @@ kind: CatalogSource
metadata:
name: pixie-operator-index
namespace: {{ .Values.olmOperatorNamespace }}
{{- if .Values.olmCatalogSource.annotations }}
annotations: {{ .Values.olmCatalogSource.annotations | toYaml | nindent 4 }}
{{- end }}
{{- if .Values.olmCatalogSource.labels }}
labels: {{ .Values.olmCatalogSource.labels | toYaml | nindent 4 }}
{{- end }}
spec:
sourceType: grpc
image: {{ if .Values.registry }}{{ .Values.registry }}/gcr.io-pixie-oss-pixie-prod-operator-bundle_index:0.0.1{{ else }}gcr.io/pixie-oss/pixie-prod/operator/bundle_index:0.0.1{{ end }}
@ -11,3 +17,21 @@ spec:
updateStrategy:
registryPoll:
interval: 10m
grpcPodConfig:
tolerations:
- key: "kubernetes.io/arch"
operator: "Equal"
value: "amd64"
effect: "NoSchedule"
- key: "kubernetes.io/arch"
operator: "Equal"
value: "amd64"
effect: "NoExecute"
- key: "kubernetes.io/arch"
operator: "Equal"
value: "arm64"
effect: "NoSchedule"
- key: "kubernetes.io/arch"
operator: "Equal"
value: "arm64"
effect: "NoExecute"

View File

@ -19,7 +19,7 @@ spec:
fieldPath: metadata.namespace
- name: PL_VIZIER_NAME
value: '{{ .Values.name }}'
image: gcr.io/pixie-oss/pixie-prod/operator/vizier_deleter:0.1.0
image: gcr.io/pixie-oss/pixie-prod/operator/vizier_deleter:0.1.1
name: delete-job
restartPolicy: Never
serviceAccountName: pl-deleter-service-account

View File

@ -15,7 +15,12 @@ olmOperatorNamespace: "px-operator"
# The bundle channel which OLM should listen to for the Vizier operator bundles.
# Should be "stable" for production-versions of the operator, and "test" for release candidates.
olmBundleChannel: "stable"
# Optional annotations and labels for CatalogSource.
olmCatalogSource:
# Optional custom annotations to add to deployed pods managed by CatalogSource object.
annotations: {}
# Optional custom labels to add to deployed pods managed by CatalogSource object.
labels: {}
## Vizier configuration
# The name of the Vizier instance deployed to the cluster.
name: "pixie"

View File

@ -13,15 +13,15 @@ dependencies:
version: 3.4.0
- name: zfs-localpv
repository: https://openebs.github.io/zfs-localpv
version: 2.1.0
version: 2.2.0
- name: lvm-localpv
repository: https://openebs.github.io/lvm-localpv
version: 1.0.1
version: 1.1.0
- name: nfs-provisioner
repository: https://openebs.github.io/dynamic-nfs-provisioner
version: 0.10.0
- name: mayastor
repository: https://openebs.github.io/mayastor-extensions
version: 2.1.0
digest: sha256:7a5581f9f69600f76a026edd6057b40b598d989b7e8f4852409ba1f285777392
generated: "2023-04-26T18:11:53.841045084Z"
version: 2.2.0
digest: sha256:dccfd161dbdb5a0fcbc66006cc2c9f174a0e614d11bb79d3b52ef884395e57a7
generated: "2023-05-29T11:58:29.349483874Z"

View File

@ -3,7 +3,7 @@ annotations:
catalog.cattle.io/display-name: OpenEBS
catalog.cattle.io/release-name: openebs
apiVersion: v2
appVersion: 3.6.0
appVersion: 3.7.0
dependencies:
- condition: openebs-ndm.enabled
name: openebs-ndm
@ -24,11 +24,11 @@ dependencies:
- condition: zfs-localpv.enabled
name: zfs-localpv
repository: file://./charts/zfs-localpv
version: 2.1.0
version: 2.2.0
- condition: lvm-localpv.enabled
name: lvm-localpv
repository: file://./charts/lvm-localpv
version: 1.0.1
version: 1.1.0
- condition: nfs-provisioner.enabled
name: nfs-provisioner
repository: file://./charts/nfs-provisioner
@ -36,7 +36,7 @@ dependencies:
- condition: mayastor.enabled
name: mayastor
repository: file://./charts/mayastor
version: 2.1.0
version: 2.2.0
description: Containerized Attached Storage for Kubernetes
home: http://www.openebs.io/
icon: https://raw.githubusercontent.com/cncf/artwork/HEAD/projects/openebs/icon/color/openebs-icon-color.png
@ -58,4 +58,4 @@ maintainers:
name: openebs
sources:
- https://github.com/openebs/openebs
version: 3.6.0
version: 3.7.0

View File

@ -148,7 +148,7 @@ The following table lists the common configurable parameters of the OpenEBS char
| `mayastor.etcd.persistence.size` | Set the size of the volume(s) used by the etcd | `""` |
| `mayastor.image.registry` | Set the container image registry for the mayastor containers | `"docker.io"` |
| `mayastor.image.repo` | Set the container image repository for the mayastor containers | `"openebs"` |
| `mayastor.image.tag` | Set the container image tag for the mayastor containers | `"v2.1.0"` |
| `mayastor.image.tag` | Set the container image tag for the mayastor containers | `"v2.2.0"` |
| `mayastor.image.pullPolicy` | Set the container ImagePullPolicy for the mayastor containers | `"Always"` |
| `mayastor.csi.image.registry` | Set the container image registry for the Kubernetes CSI sidecar containers | `"registry.k8s.io"` |
| `mayastor.csi.image.repo` | Set the container image repository for the Kubernetes CSI sidecar containers | `"sig-storage"` |

View File

@ -1,7 +1,7 @@
apiVersion: v2
appVersion: 1.0.0
appVersion: 1.1.0
description: CSI Driver for dynamic provisioning of LVM Persistent Local Volumes.
home: http://www.openebs.io/
home: https://openebs.io/
icon: https://raw.githubusercontent.com/cncf/artwork/master/projects/openebs/icon/color/openebs-icon-color.png
keywords:
- cloud-native-storage
@ -20,4 +20,4 @@ maintainers:
name: lvm-localpv
sources:
- https://github.com/openebs/lvm-localpv
version: 1.0.1
version: 1.1.0

View File

@ -100,10 +100,10 @@ helm install openebs-lvmlocalpv openebs-lvmlocalpv/lvm-localpv --namespace opene
| `lvmPlugin.image.registry`| Registry for openebs-lvm-plugin image| `""`|
| `lvmPlugin.image.repository`| Image repository for openebs-lvm-plugin| `openebs/lvm-driver`|
| `lvmPlugin.image.pullPolicy`| Image pull policy for openebs-lvm-plugin| `IfNotPresent`|
| `lvmPlugin.image.tag`| Image tag for openebs-lvm-plugin| `1.0.0`|
| `lvmPlugin.image.tag`| Image tag for openebs-lvm-plugin| `1.1.0`|
| `lvmPlugin.metricsPort`| The TCP port number used for exposing lvm-metrics | `9500`|
| `lvmPlugin.allowedTopologies`| The comma seperated list of allowed node topologies | `kubernetes.io/hostname,`|
| `lvmNode.driverRegistrar.image.registry`| Registry for csi-node-driver-registrar image| `k8s.gcr.io/`|
| `lvmNode.driverRegistrar.image.registry`| Registry for csi-node-driver-registrar image| `registry.k8s.io/`|
| `lvmNode.driverRegistrar.image.repository`| Image repository for csi-node-driver-registrar| `sig-storage/csi-node-driver-registrar`|
| `lvmNode.driverRegistrar.image.pullPolicy`| Image pull policy for csi-node-driver-registrar| `IfNotPresent`|
| `lvmNode.driverRegistrar.image.tag`| Image tag for csi-node-driver-registrar| `v2.3.0`|
@ -117,19 +117,19 @@ helm install openebs-lvmlocalpv openebs-lvmlocalpv/lvm-localpv --namespace opene
| `lvmNode.nodeSelector`| Nodeselector for lvmnode daemonset pods| `""`|
| `lvmNode.tolerations` | lvmnode daemonset's pod toleration values | `""`|
| `lvmNode.securityContext` | Security context for lvmnode daemonset container | `""`|
| `lvmController.resizer.image.registry`| Registry for csi-resizer image| `k8s.gcr.io/`|
| `lvmController.resizer.image.registry`| Registry for csi-resizer image| `registry.k8s.io/`|
| `lvmController.resizer.image.repository`| Image repository for csi-resizer| `sig-storage/csi-resizer`|
| `lvmController.resizer.image.pullPolicy`| Image pull policy for csi-resizer| `IfNotPresent`|
| `lvmController.resizer.image.tag`| Image tag for csi-resizer| `v1.2.0`|
| `lvmController.snapshotter.image.registry`| Registry for csi-snapshotter image| `k8s.gcr.io/`|
| `lvmController.snapshotter.image.registry`| Registry for csi-snapshotter image| `registry.k8s.io/`|
| `lvmController.snapshotter.image.repository`| Image repository for csi-snapshotter| `sig-storage/csi-snapshotter`|
| `lvmController.snapshotter.image.pullPolicy`| Image pull policy for csi-snapshotter| `IfNotPresent`|
| `lvmController.snapshotter.image.tag`| Image tag for csi-snapshotter| `v4.0.0`|
| `lvmController.snapshotController.image.registry`| Registry for snapshot-controller image| `k8s.gcr.io/`|
| `lvmController.snapshotController.image.registry`| Registry for snapshot-controller image| `registry.k8s.io/`|
| `lvmController.snapshotController.image.repository`| Image repository for snapshot-controller| `sig-storage/snapshot-controller`|
| `lvmController.snapshotController.image.pullPolicy`| Image pull policy for snapshot-controller| `IfNotPresent`|
| `lvmController.snapshotController.image.tag`| Image tag for snapshot-controller| `v4.0.0`|
| `lvmController.provisioner.image.registry`| Registry for csi-provisioner image| `k8s.gcr.io/`|
| `lvmController.provisioner.image.registry`| Registry for csi-provisioner image| `registry.k8s.io/`|
| `lvmController.provisioner.image.repository`| Image repository for csi-provisioner| `sig-storage/csi-provisioner`|
| `lvmController.provisioner.image.pullPolicy`| Image pull policy for csi-provisioner| `IfNotPresent`|
| `lvmController.provisioner.image.tag`| Image tag for csi-provisioner| `v2.3.0`|

View File

@ -131,6 +131,8 @@ spec:
args :
- "--endpoint=$(OPENEBS_CSI_ENDPOINT)"
- "--plugin=$(OPENEBS_CONTROLLER_DRIVER)"
- "--kube-api-qps={{ .Values.lvmController.kubeClientRateLimiter.qps }}"
- "--kube-api-burst={{ .Values.lvmController.kubeClientRateLimiter.burst }}"
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/

Some files were not shown because too many files have changed in this diff Show More