Charts CI

```
Updated:
  bitnami/redis:
    - 17.11.4
  bitnami/tomcat:
    - 10.9.3
  btp/chronicle:
    - 0.1.16
  clastix/kamaji:
    - 0.12.1
  cockroach-labs/cockroachdb:
    - 11.0.2
  datadog/datadog:
    - 3.32.2
  instana/instana-agent:
    - 1.2.60
  percona/pxc-db:
    - 1.12.3
  percona/pxc-operator:
    - 1.12.2
  redpanda/redpanda:
    - 4.0.33
  speedscale/speedscale-operator:
    - 1.3.13
  sysdig/sysdig:
    - 1.15.92
```
pull/794/head
github-actions[bot] 2023-06-14 14:16:58 +00:00
parent b3e5ea9f76
commit 2df1c07e6c
62 changed files with 624 additions and 782 deletions

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -28,4 +28,4 @@ maintainers:
name: redis name: redis
sources: sources:
- https://github.com/bitnami/charts/tree/main/bitnami/redis - https://github.com/bitnami/charts/tree/main/bitnami/redis
version: 17.11.3 version: 17.11.4

View File

@ -366,15 +366,15 @@ The command removes all the Kubernetes components associated with the chart and
| `sentinel.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | | `sentinel.startupProbe.successThreshold` | Success threshold for startupProbe | `1` |
| `sentinel.livenessProbe.enabled` | Enable livenessProbe on Redis® Sentinel nodes | `true` | | `sentinel.livenessProbe.enabled` | Enable livenessProbe on Redis® Sentinel nodes | `true` |
| `sentinel.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `20` | | `sentinel.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `20` |
| `sentinel.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `5` | | `sentinel.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` |
| `sentinel.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | | `sentinel.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` |
| `sentinel.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `5` | | `sentinel.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` |
| `sentinel.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | | `sentinel.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` |
| `sentinel.readinessProbe.enabled` | Enable readinessProbe on Redis® Sentinel nodes | `true` | | `sentinel.readinessProbe.enabled` | Enable readinessProbe on Redis® Sentinel nodes | `true` |
| `sentinel.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `20` | | `sentinel.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `20` |
| `sentinel.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `5` | | `sentinel.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `5` |
| `sentinel.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `1` | | `sentinel.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `1` |
| `sentinel.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `5` | | `sentinel.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` |
| `sentinel.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | | `sentinel.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` |
| `sentinel.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | | `sentinel.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` |
| `sentinel.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | | `sentinel.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` |
@ -937,7 +937,7 @@ kubectl patch deployments my-release-redis-metrics --type=json -p='[{"op": "remo
## License ## License
Copyright © 2023 Bitnami Copyright © 2023 VMware, Inc.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@ -1113,10 +1113,10 @@ sentinel:
livenessProbe: livenessProbe:
enabled: true enabled: true
initialDelaySeconds: 20 initialDelaySeconds: 20
periodSeconds: 5 periodSeconds: 10
timeoutSeconds: 5 timeoutSeconds: 5
successThreshold: 1 successThreshold: 1
failureThreshold: 5 failureThreshold: 6
## @param sentinel.readinessProbe.enabled Enable readinessProbe on Redis® Sentinel nodes ## @param sentinel.readinessProbe.enabled Enable readinessProbe on Redis® Sentinel nodes
## @param sentinel.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe ## @param sentinel.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
## @param sentinel.readinessProbe.periodSeconds Period seconds for readinessProbe ## @param sentinel.readinessProbe.periodSeconds Period seconds for readinessProbe
@ -1130,7 +1130,7 @@ sentinel:
periodSeconds: 5 periodSeconds: 5
timeoutSeconds: 1 timeoutSeconds: 1
successThreshold: 1 successThreshold: 1
failureThreshold: 5 failureThreshold: 6
## @param sentinel.customStartupProbe Custom startupProbe that overrides the default one ## @param sentinel.customStartupProbe Custom startupProbe that overrides the default one
## ##
customStartupProbe: {} customStartupProbe: {}

View File

@ -6,7 +6,7 @@ annotations:
category: ApplicationServer category: ApplicationServer
licenses: Apache-2.0 licenses: Apache-2.0
apiVersion: v2 apiVersion: v2
appVersion: 10.1.9 appVersion: 10.1.10
dependencies: dependencies:
- name: common - name: common
repository: file://./charts/common repository: file://./charts/common
@ -31,4 +31,4 @@ maintainers:
name: tomcat name: tomcat
sources: sources:
- https://github.com/bitnami/charts/tree/main/bitnami/tomcat - https://github.com/bitnami/charts/tree/main/bitnami/tomcat
version: 10.9.2 version: 10.9.3

View File

@ -76,10 +76,10 @@ The command removes all the Kubernetes components associated with the chart and
### Tomcat parameters ### Tomcat parameters
| Name | Description | Value | | Name | Description | Value |
| ----------------------------- | ------------------------------------------------------------------------------------------------------ | --------------------- | | ----------------------------- | ------------------------------------------------------------------------------------------------------ | ---------------------- |
| `image.registry` | Tomcat image registry | `docker.io` | | `image.registry` | Tomcat image registry | `docker.io` |
| `image.repository` | Tomcat image repository | `bitnami/tomcat` | | `image.repository` | Tomcat image repository | `bitnami/tomcat` |
| `image.tag` | Tomcat image tag (immutable tags are recommended) | `10.1.9-debian-11-r4` | | `image.tag` | Tomcat image tag (immutable tags are recommended) | `10.1.10-debian-11-r0` |
| `image.digest` | Tomcat image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | | `image.digest` | Tomcat image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `image.pullPolicy` | Tomcat image pull policy | `IfNotPresent` | | `image.pullPolicy` | Tomcat image pull policy | `IfNotPresent` |
| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | | `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` |
@ -201,7 +201,7 @@ The command removes all the Kubernetes components associated with the chart and
| `volumePermissions.enabled` | Enable init container that changes volume permissions in the data directory | `false` | | `volumePermissions.enabled` | Enable init container that changes volume permissions in the data directory | `false` |
| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | | `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` |
| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/bitnami-shell` | | `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/bitnami-shell` |
| `volumePermissions.image.tag` | Init container volume-permissions image tag | `11-debian-11-r122` | | `volumePermissions.image.tag` | Init container volume-permissions image tag | `11-debian-11-r125` |
| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | | `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | | `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` |
| `volumePermissions.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | | `volumePermissions.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` |
@ -216,7 +216,7 @@ The command removes all the Kubernetes components associated with the chart and
| `metrics.jmx.catalinaOpts` | custom option used to enabled JMX on tomcat jvm evaluated as template | `-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.port=5555 -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.local.only=true` | | `metrics.jmx.catalinaOpts` | custom option used to enabled JMX on tomcat jvm evaluated as template | `-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.port=5555 -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.local.only=true` |
| `metrics.jmx.image.registry` | JMX exporter image registry | `docker.io` | | `metrics.jmx.image.registry` | JMX exporter image registry | `docker.io` |
| `metrics.jmx.image.repository` | JMX exporter image repository | `bitnami/jmx-exporter` | | `metrics.jmx.image.repository` | JMX exporter image repository | `bitnami/jmx-exporter` |
| `metrics.jmx.image.tag` | JMX exporter image tag (immutable tags are recommended) | `0.18.0-debian-11-r26` | | `metrics.jmx.image.tag` | JMX exporter image tag (immutable tags are recommended) | `0.18.0-debian-11-r29` |
| `metrics.jmx.image.digest` | JMX exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | | `metrics.jmx.image.digest` | JMX exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `metrics.jmx.image.pullPolicy` | JMX exporter image pull policy | `IfNotPresent` | | `metrics.jmx.image.pullPolicy` | JMX exporter image pull policy | `IfNotPresent` |
| `metrics.jmx.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | | `metrics.jmx.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` |
@ -382,7 +382,7 @@ kubectl patch deployment tomcat --type=json -p='[{"op": "remove", "path": "/spec
## License ## License
Copyright © 2023 VMware Inc Copyright © 2023 VMware, Inc.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@ -58,7 +58,7 @@ extraDeploy: []
image: image:
registry: docker.io registry: docker.io
repository: bitnami/tomcat repository: bitnami/tomcat
tag: 10.1.9-debian-11-r4 tag: 10.1.10-debian-11-r0
digest: "" digest: ""
## Specify a imagePullPolicy ## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
@ -582,7 +582,7 @@ volumePermissions:
image: image:
registry: docker.io registry: docker.io
repository: bitnami/bitnami-shell repository: bitnami/bitnami-shell
tag: 11-debian-11-r122 tag: 11-debian-11-r125
digest: "" digest: ""
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets. ## Optionally specify an array of imagePullSecrets.
@ -642,7 +642,7 @@ metrics:
image: image:
registry: docker.io registry: docker.io
repository: bitnami/jmx-exporter repository: bitnami/jmx-exporter
tag: 0.18.0-debian-11-r26 tag: 0.18.0-debian-11-r29
digest: "" digest: ""
## Specify a imagePullPolicy ## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'

View File

@ -22,4 +22,4 @@ keywords:
- blockchain - blockchain
name: chronicle name: chronicle
type: application type: application
version: 0.1.15 version: 0.1.16

View File

@ -55,21 +55,65 @@ chronicle: {{ include "common.names.fullname" . }}
{{ include "common.names.fullname" . }}-test-id-provider {{ include "common.names.fullname" . }}-test-id-provider
{{- end -}} {{- end -}}
{{- define "chronicle.jwksUrl" -}} {{- define "chronicle.id-provider.service.url" -}}
http://{{ include "chronicle.id-provider.service" . }}:8090/jwks
{{- end -}}
{{- define "chronicle.id-claims" -}}
{{- if .Values.auth.id.claims -}}
--id-claims {{ .Values.auth.id.claims }} \
{{- else -}}
{{- /* Do nothing */ -}}
{{- end -}}
{{- end -}}
{{/* The JWKS and userinfo URLs are connected. */}}
{{/* If either is provided Chronicle will use the user-provided options. */}}
{{/* If neither is provided Chronicle should fall back to using the 'devIdProvider'.*/}}
{{- define "chronicle.jwks-url.url" -}}
{{- if or (.Values.auth.jwks.url) (.Values.auth.userinfo.url) -}}
{{- if .Values.auth.jwks.url -}} {{- if .Values.auth.jwks.url -}}
{{ .Values.auth.jwks.url }} {{ .Values.auth.jwks.url }}
{{- end -}}
{{- else -}} {{- else -}}
{{- if .Values.devIdProvider.enabled -}} {{- if .Values.devIdProvider.enabled -}}
http://{{ include "chronicle.id-provider.service" . }}:8090/jwks {{ include "chronicle.id-provider.service.url" . }}
{{- else -}} {{- else -}}
{{ required "devIdProvider.enabled must be true or auth.jwks.url must be set!" .Values.auth.jwks.url }} {{/* Do nothing */}}
{{- end -}} {{- end -}}
{{- end -}} {{- end -}}
{{- end -}} {{- end -}}
{{- define "chronicle.userinfoUrl" -}} {{- define "chronicle.jwks-url.cli" -}}
{{- if or (.Values.auth.jwks.url) (.Values.devIdProvider.enabled) -}}
--jwks-address {{ include "chronicle.jwks-url.url" . }} \
{{- end -}}
{{/* Do nothing */}}
{{- end -}}
{{/* The JWKS and userinfo URLs are connected. */}}
{{/* If either is provided Chronicle will use the user-provided options. */}}
{{/* If neither is provided Chronicle should fall back to using the 'devIdProvider'.*/}}
{{- define "chronicle.userinfo-url" -}}
{{- if or (.Values.auth.jwks.url) (.Values.auth.userinfo.url) -}}
{{- if .Values.auth.userinfo.url -}}
{{ .Values.auth.userinfo.url }} {{ .Values.auth.userinfo.url }}
{{- end -}} {{- end -}}
{{- else -}}
{{- if .Values.devIdProvider.enabled -}}
{{ include "chronicle.id-provider.service.url" . }}
{{- else -}}
{{/* Do nothing */}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- define "chronicle.userinfo-url.cli" -}}
{{- if or (.Values.auth.userinfo.url) (.Values.devIdProvider.enabled) -}}
--userinfo-address {{ include "chronicle.userinfo-url" . }} \
{{- end -}}
{{/* Do nothing */}}
{{- end -}}
{{- define "chronicle.root-key.secret" -}} {{- define "chronicle.root-key.secret" -}}
{{ include "common.names.fullname" . }}-root-key {{ include "common.names.fullname" . }}-root-key

View File

@ -104,7 +104,7 @@ spec:
- | - |
{{ if .Values.auth.required }} {{ if .Values.auth.required }}
{{ if and (not .Values.auth.jwks.url) (not .Values.auth.userinfo.url) (not .Values.devIdProvider.enabled) }} {{ if and (not .Values.auth.jwks.url) (not .Values.auth.userinfo.url) (not .Values.devIdProvider.enabled) }}
{{ required "If auth.required you need to provide at least auth.jwks.url or auth.userinfo.url" .Values.auth.jwks.url }} {{ required "If 'auth.required' you need to provide at least 'auth.jwks.url' or 'auth.userinfo.url', or 'devIdProvider.enabled' must be 'true'!" .Values.auth.jwks.url }}
{{ end }} {{ end }}
{{ end }} {{ end }}
@ -125,14 +125,10 @@ spec:
--interface 0.0.0.0:{{ .Values.port }} \ --interface 0.0.0.0:{{ .Values.port }} \
{{- if .Values.auth.required }} {{- if .Values.auth.required }}
--require-auth \ --require-auth \
--id-claims {{ .Values.auth.id.claims }} \
{{- if .Values.auth.jwks.enabled }}
--jwks-address {{ include "chronicle.jwksUrl" . }} \
{{- end }}
{{- if .Values.auth.userinfo.url }}
--userinfo-address {{ include "chronicle.userinfoUrl" . }} \
{{- end }}
{{- end }} {{- end }}
{{ include "chronicle.jwks-url.cli" . }}
{{ include "chronicle.userinfo-url.cli" . }}
{{ include "chronicle.id-claims" . }}
; ;
env: {{ include "lib.safeToYaml" .Values.env | nindent 12 }} env: {{ include "lib.safeToYaml" .Values.env | nindent 12 }}
- name: RUST_LOG - name: RUST_LOG

View File

@ -1,5 +1,4 @@
{{- if .Values.test.enabled }} {{- if .Values.test.enabled }}
{{- if .Values.auth.required }}
--- ---
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
kind: Role kind: Role
@ -34,4 +33,3 @@ subjects:
name: {{ include "lib.serviceAccountName" . }} name: {{ include "lib.serviceAccountName" . }}
namespace: {{ .Release.Namespace }} namespace: {{ .Release.Namespace }}
{{- end }} {{- end }}
{{- end }}

View File

@ -15,29 +15,51 @@ spec:
restartPolicy: Never restartPolicy: Never
serviceAccountName: {{ include "lib.serviceAccountName" . }} serviceAccountName: {{ include "lib.serviceAccountName" . }}
automountServiceAccountToken: true automountServiceAccountToken: true
initContainers:
{{- if .Values.auth.required }} {{- if .Values.auth.required }}
{{ if not .Values.devIdProvider.enabled }}
{{ required "If 'auth.required' when using the test suite 'devIdProvider.enabled' must be set to 'true'!" .Values.devIdProvider.enabled }}
{{ end }}
initContainers:
- name: wait-for-id-provider - name: wait-for-id-provider
{{- include "lib.image" (dict "imageRoot" .Values.test.api.image "global" .Values.global ) | nindent 10 }} {{- include "lib.image" (dict "imageRoot" .Values.test.api.image "global" .Values.global ) | nindent 10 }}
command: [ "sh", "-ec" ] command: [ "sh", "-c" ]
args: args:
- | - |
URL={{ include "chronicle.jwksUrl" . }} URL="{{ include "chronicle.id-provider.service.url" . }}"
scheme=$(echo $URL | cut -f 1 -d :)
hostAndPort=$(echo $URL | cut -f 3 -d /) wait_for_url() {
HOST=$(echo $hostAndPort | cut -f 1 -d :) local url=$1
port=$(echo $hostAndPort | awk -F: '{print $2}') scheme=$(echo "$url" | cut -f 1 -d :)
if [ "$scheme" = "http" ]; then hostAndPort=$(echo "$url" | cut -f 3 -d /)
HOST=$(echo "$hostAndPort" | cut -f 1 -d :)
port=$(echo "$hostAndPort" | awk -F: '{print $2}')
case $scheme in
"http")
defaultPort=80 defaultPort=80
elif [ "$scheme" = "https" ]; then ;;
"https")
defaultPort=443 defaultPort=443
else ;;
*)
defaultPort=80 defaultPort=80
fi ;;
esac
PORT=${port:-$defaultPort} PORT=${port:-$defaultPort}
wait-for-it "$HOST:$PORT" --timeout=120
}
echo "Waiting for id-provider to be ready ..." echo "Waiting for id-provider to be ready ..."
wait-for-it $HOST:$PORT --timeout=0 wait_for_url "$URL"
if [ $? -eq 0 ]; then
echo "Id-provider is ready. Exiting." echo "Id-provider is ready. Exiting."
exit 0
else
echo "Timeout occurred. Please check if the correct JWKS URL has been provided."
exit 1
fi
- name: token-loader - name: token-loader
image: alpine/k8s:1.24.13 image: alpine/k8s:1.24.13
command: [ "sh", "-ec" ] command: [ "sh", "-ec" ]
@ -65,13 +87,12 @@ spec:
echo "Getting IP address for API ..." echo "Getting IP address for API ..."
getent hosts $API | cut -f 1 -d \ | head -n 1 > /shared-data/api-ip || exit 1 getent hosts $API | cut -f 1 -d \ | head -n 1 > /shared-data/api-ip || exit 1
{{- if .Values.auth.required }}
if [ -f "/shared-data/jwks-token" ]; then if [ -f "/shared-data/jwks-token" ]; then
echo "Found token." echo "Found token."
sleep 5 sleep 5
export TOKEN=$(cat "/shared-data/jwks-token") export TOKEN=$(cat "/shared-data/jwks-token")
fi fi
{{- end }}
export HOST=$(cat /shared-data/api-ip) export HOST=$(cat /shared-data/api-ip)
echo "Testing API with subscribe-submit-test..." echo "Testing API with subscribe-submit-test..."
subscribe-submit-test subscribe-submit-test

View File

@ -12,12 +12,12 @@ global:
affinity: {} affinity: {}
auth: auth:
## @md | `auth.required` | if true require authentication | false | ## @md | `auth.required` | if true require authentication, rejecting 'anonymous' requests | false |
required: false required: false
id: id:
claims: email ## @md | `auth.id.claims` | Chronicle provides default values ["iss", "sub"] | nil |
claims:
jwks: jwks:
enabled: true
url: url:
userinfo: userinfo:
url: url:
@ -89,7 +89,7 @@ logLevel: info
opa: opa:
## @md | `opa.enabled` | if true set up a full OPA enabled setup | true | ## @md | `opa.enabled` | if true set up a full OPA enabled setup | true |
enabled: true enabled: false
opaInit: opaInit:
## @md | `opa.init.image` | the image to use for the chronicle-init container | blockchaintp/chronicle-opa-init | ## @md | `opa.init.image` | the image to use for the chronicle-init container | blockchaintp/chronicle-opa-init |
image: image:
@ -131,8 +131,7 @@ serviceAccount:
name: name:
test: test:
## @md | `test.enabled` | true to enable test Jobs and Services | true | ## @md | `test.api` | test the chronicle GraphQL server API |
enabled: true
api: api:
## @md | `api-test-container.image` | the image to use for the api-test container | blockchaintp/chronicle-api-test | ## @md | `api-test-container.image` | the image to use for the api-test container | blockchaintp/chronicle-api-test |
image: image:
@ -142,6 +141,8 @@ test:
repository: blockchaintp/chronicle-helm-api-test-amd64 repository: blockchaintp/chronicle-helm-api-test-amd64
## @md | `test.api.image.tag` | the image tag | latest | ## @md | `test.api.image.tag` | the image tag | latest |
tag: BTP2.1.0-0.7.3 tag: BTP2.1.0-0.7.3
## @md | `test.enabled` | true to enable test Jobs and Services | true |
enabled: true
postgres: postgres:
# if enabled we allocate a postgres database here # if enabled we allocate a postgres database here

View File

@ -1,15 +1,18 @@
annotations: annotations:
catalog.cattle.io/certified: partner catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: Kamaji - Managed Kubernetes Service catalog.cattle.io/display-name: Kamaji
catalog.cattle.io/kube-version: '>=1.21.0-0' catalog.cattle.io/kube-version: '>=1.21.0-0'
catalog.cattle.io/release-name: kamaji catalog.cattle.io/release-name: kamaji
apiVersion: v2 apiVersion: v2
appVersion: v0.3.0 appVersion: v0.3.0
description: Kamaji is a tool aimed to build and operate a Managed Kubernetes Service description: Kamaji deploys and operates Kubernetes at scale with a fraction of the
with a fraction of the operational burden. With Kamaji, you can deploy and operate operational burden. Kamaji turns any Kubernetes cluster into an “admin cluster”
hundreds of Kubernetes clusters as a hyper-scaler. to orchestrate other Kubernetes clusters called “tenant clusters”. Kamaji is special
because the Control Plane components are running in a single pod instead of dedicated
machines. This solution makes running multiple Control Planes cheaper and easier
to deploy and operate.
home: https://github.com/clastix/kamaji home: https://github.com/clastix/kamaji
icon: https://github.com/clastix/kamaji/raw/master/assets/kamaji-logo.png icon: https://github.com/clastix/kamaji/raw/master/assets/logo-colored.png
kubeVersion: '>=1.21.0-0' kubeVersion: '>=1.21.0-0'
maintainers: maintainers:
- email: dario@tranchitella.eu - email: dario@tranchitella.eu
@ -22,4 +25,4 @@ name: kamaji
sources: sources:
- https://github.com/clastix/kamaji - https://github.com/clastix/kamaji
type: application type: application
version: 0.12.0 version: 0.12.1

View File

@ -1,8 +1,8 @@
# kamaji # kamaji
![Version: 0.12.0](https://img.shields.io/badge/Version-0.12.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v0.3.0](https://img.shields.io/badge/AppVersion-v0.3.0-informational?style=flat-square) ![Version: 0.12.1](https://img.shields.io/badge/Version-0.12.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v0.3.0](https://img.shields.io/badge/AppVersion-v0.3.0-informational?style=flat-square)
Kamaji is a tool aimed to build and operate a Managed Kubernetes Service with a fraction of the operational burden. With Kamaji, you can deploy and operate hundreds of Kubernetes clusters as a hyper-scaler. Kamaji deploys and operates Kubernetes at scale with a fraction of the operational burden. Kamaji turns any Kubernetes cluster into an “admin cluster” to orchestrate other Kubernetes clusters called “tenant clusters”. Kamaji is special because the Control Plane components are running in a single pod instead of dedicated machines. This solution makes running multiple Control Planes cheaper and easier to deploy and operate.
## Maintainers ## Maintainers

View File

@ -1,30 +1,12 @@
# Kamaji - Managed Kubernetes Service # Kamaji
Kamaji is a tool aimed to build and operate a Managed Kubernetes Service with a fraction of the operational burden. Kamaji deploys and operates Kubernetes at scale with a fraction of the operational burden.
Useful links: Useful links:
- [Kamaji Github repository](https://github.com/clastix/kamaji) - [Kamaji Github repository](https://github.com/clastix/kamaji)
- [Kamaji Documentation](https://github.com/clastix/kamaji/docs/) - [Kamaji Documentation](https://kamaji.clastix.io)
## Requirements ## Requirements
* Kubernetes v1.22+ * Kubernetes v1.22+
* Helm v3 * Helm v3
# Installation
To install the Chart with the release name `kamaji`:
helm upgrade --install --namespace kamaji-system --create-namespace clastix/kamaji
Show the status:
helm status kamaji -n kamaji-system
Upgrade the Chart
helm upgrade kamaji -n kamaji-system clastix/kamaji
Uninstall the Chart
helm uninstall kamaji -n kamaji-system

View File

@ -4,7 +4,7 @@ annotations:
catalog.cattle.io/kube-version: '>=1.8-0' catalog.cattle.io/kube-version: '>=1.8-0'
catalog.cattle.io/release-name: cockroachdb catalog.cattle.io/release-name: cockroachdb
apiVersion: v1 apiVersion: v1
appVersion: 23.1.2 appVersion: 23.1.3
description: CockroachDB is a scalable, survivable, strongly-consistent SQL database. description: CockroachDB is a scalable, survivable, strongly-consistent SQL database.
home: https://www.cockroachlabs.com home: https://www.cockroachlabs.com
icon: https://raw.githubusercontent.com/cockroachdb/cockroach/master/docs/media/cockroach_db.png icon: https://raw.githubusercontent.com/cockroachdb/cockroach/master/docs/media/cockroach_db.png
@ -14,4 +14,4 @@ maintainers:
name: cockroachdb name: cockroachdb
sources: sources:
- https://github.com/cockroachdb/cockroach - https://github.com/cockroachdb/cockroach
version: 11.0.1 version: 11.0.2

View File

@ -229,10 +229,10 @@ kubectl get pods \
``` ```
``` ```
my-release-cockroachdb-0 cockroachdb/cockroach:v23.1.2 my-release-cockroachdb-0 cockroachdb/cockroach:v23.1.3
my-release-cockroachdb-1 cockroachdb/cockroach:v23.1.2 my-release-cockroachdb-1 cockroachdb/cockroach:v23.1.3
my-release-cockroachdb-2 cockroachdb/cockroach:v23.1.2 my-release-cockroachdb-2 cockroachdb/cockroach:v23.1.3
my-release-cockroachdb-3 cockroachdb/cockroach:v23.1.2 my-release-cockroachdb-3 cockroachdb/cockroach:v23.1.3
``` ```
Resume normal operations. Once you are comfortable that the stability and performance of the cluster is what you'd expect post-upgrade, finalize the upgrade: Resume normal operations. Once you are comfortable that the stability and performance of the cluster is what you'd expect post-upgrade, finalize the upgrade:
@ -285,9 +285,9 @@ Verify that no pod is deleted and then upgrade as normal. A new StatefulSet will
### See also ### See also
For more information about upgrading a cluster to the latest major release of CockroachDB, see [Upgrade to CockroachDB v21.1](https://www.cockroachlabs.com/docs/stable/upgrade-cockroach-version.html). For more information about upgrading a cluster to the latest major release of CockroachDB, see [Upgrade to CockroachDB](https://www.cockroachlabs.com/docs/stable/upgrade-cockroach-version.html).
Note that there are some backward-incompatible changes to SQL features between versions 20.2 and 21.1. For details, see the [CockroachDB v23.1.2 release notes](https://www.cockroachlabs.com/docs/releases/v23.1.2.html#backward-incompatible-changes). Note that there are sometimes backward-incompatible changes to SQL features between major CockroachDB releases. For details, see the [Upgrade Policy](https://www.cockroachlabs.com/docs/cockroachcloud/upgrade-policy).
## Configuration ## Configuration
@ -316,7 +316,7 @@ For details see the [`values.yaml`](values.yaml) file.
| `conf.store.size` | CockroachDB storage size | `""` | | `conf.store.size` | CockroachDB storage size | `""` |
| `conf.store.attrs` | CockroachDB storage attributes | `""` | | `conf.store.attrs` | CockroachDB storage attributes | `""` |
| `image.repository` | Container image name | `cockroachdb/cockroach` | | `image.repository` | Container image name | `cockroachdb/cockroach` |
| `image.tag` | Container image tag | `v23.1.2` | | `image.tag` | Container image tag | `v23.1.3` |
| `image.pullPolicy` | Container pull policy | `IfNotPresent` | | `image.pullPolicy` | Container pull policy | `IfNotPresent` |
| `image.credentials` | `registry`, `user` and `pass` credentials to pull private image | `{}` | | `image.credentials` | `registry`, `user` and `pass` credentials to pull private image | `{}` |
| `statefulset.replicas` | StatefulSet replicas number | `3` | | `statefulset.replicas` | StatefulSet replicas number | `3` |

View File

@ -122,16 +122,17 @@ spec:
--cluster-name={{.}} \ --cluster-name={{.}} \
{{- end }} {{- end }}
--host={{ template "cockroachdb.fullname" . }}-0.{{ template "cockroachdb.fullname" . -}} --host={{ template "cockroachdb.fullname" . }}-0.{{ template "cockroachdb.fullname" . -}}
:{{ .Values.service.ports.grpc.internal.port | int64 }} :{{ .Values.service.ports.grpc.internal.port | int64 }} \
2>&1); 2>&1);
local exitCode="$?"; local exitCode="$?";
echo $output; echo $output;
if [[ "$exitCode" == "0" || "$output" == *"cluster has already been initialized"* ]] if [[ "$output" =~ .*"Cluster successfully initialized".* || "$output" =~ .*"cluster has already been initialized".* ]]; then
then break; break;
fi fi
echo "Cluster is not ready to be initialized, retrying in 5 seconds"
sleep 5; sleep 5;
done done
} }

View File

@ -12,14 +12,14 @@ metadata:
app.kubernetes.io/instance: {{ .Release.Name | quote }} app.kubernetes.io/instance: {{ .Release.Name | quote }}
app.kubernetes.io/managed-by: {{ .Release.Service | quote }} app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
{{- if $serviceMonitor.labels }} {{- if $serviceMonitor.labels }}
{{ toYaml $serviceMonitor.labels | nindent 4 }} {{- toYaml $serviceMonitor.labels | nindent 4 }}
{{- end }} {{- end }}
{{- with .Values.labels }} {{- with .Values.labels }}
{{- toYaml . | nindent 4 }} {{- toYaml . | nindent 4 }}
{{- end }} {{- end }}
{{- if $serviceMonitor.annotations }} {{- if $serviceMonitor.annotations }}
annotations: annotations:
{{ toYaml $serviceMonitor.annotations | nindent 4 }} {{- toYaml $serviceMonitor.annotations | nindent 4 }}
{{- end }} {{- end }}
spec: spec:
selector: selector:

View File

@ -1,7 +1,7 @@
# Generated file, DO NOT EDIT. Source: build/templates/values.yaml # Generated file, DO NOT EDIT. Source: build/templates/values.yaml
image: image:
repository: cockroachdb/cockroach repository: cockroachdb/cockroach
tag: v23.1.2 tag: v23.1.3
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
credentials: {} credentials: {}
# registry: docker.io # registry: docker.io
@ -518,7 +518,7 @@ tls:
# Image Placeholder for the selfSigner utility. This will be changed once the CI workflows for the image is in place. # Image Placeholder for the selfSigner utility. This will be changed once the CI workflows for the image is in place.
image: image:
repository: cockroachlabs-helm-charts/cockroach-self-signer-cert repository: cockroachlabs-helm-charts/cockroach-self-signer-cert
tag: "1.3" tag: "1.4"
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
credentials: {} credentials: {}
registry: gcr.io registry: gcr.io

View File

@ -1,7 +1,12 @@
# Datadog changelog # Datadog changelog
## 3.32.2
* Set the `priority` field of the OpenShifts SCC to `null` in order to not have a higher priority than the OpenShift 4.11+ default `restricted-v2` SCC.
## 3.32.1 ## 3.32.1
* Add AP1 Site Comment at `value.yaml`.
* Fix CVE in the FIPS compliant side car container * Fix CVE in the FIPS compliant side car container
## 3.32.0 ## 3.32.0

View File

@ -19,4 +19,4 @@ name: datadog
sources: sources:
- https://app.datadoghq.com/account/settings#agent/kubernetes - https://app.datadoghq.com/account/settings#agent/kubernetes
- https://github.com/DataDog/datadog-agent - https://github.com/DataDog/datadog-agent
version: 3.32.1 version: 3.32.2

View File

@ -1,6 +1,6 @@
# Datadog # Datadog
![Version: 3.32.1](https://img.shields.io/badge/Version-3.32.1-informational?style=flat-square) ![AppVersion: 7](https://img.shields.io/badge/AppVersion-7-informational?style=flat-square) ![Version: 3.32.2](https://img.shields.io/badge/Version-3.32.2-informational?style=flat-square) ![AppVersion: 7](https://img.shields.io/badge/AppVersion-7-informational?style=flat-square)
[Datadog](https://www.datadoghq.com/) is a hosted infrastructure monitoring platform. This chart adds the Datadog Agent to all nodes in your cluster via a DaemonSet. It also optionally depends on the [kube-state-metrics chart](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-state-metrics). For more information about monitoring Kubernetes with Datadog, please refer to the [Datadog documentation website](https://docs.datadoghq.com/agent/basic_agent_usage/kubernetes/). [Datadog](https://www.datadoghq.com/) is a hosted infrastructure monitoring platform. This chart adds the Datadog Agent to all nodes in your cluster via a DaemonSet. It also optionally depends on the [kube-state-metrics chart](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-state-metrics). For more information about monitoring Kubernetes with Datadog, please refer to the [Datadog documentation website](https://docs.datadoghq.com/agent/basic_agent_usage/kubernetes/).

View File

@ -7,7 +7,7 @@ metadata:
{{ include "datadog.labels" . | indent 4 }} {{ include "datadog.labels" . | indent 4 }}
users: users:
- system:serviceaccount:{{ .Release.Namespace }}:{{ include "agents.serviceAccountName" . }} - system:serviceaccount:{{ .Release.Namespace }}:{{ include "agents.serviceAccountName" . }}
priority: 8 priority: null
# Allow host ports for dsd / trace intake # Allow host ports for dsd / trace intake
allowHostPorts: {{ or .Values.datadog.dogstatsd.useHostPort .Values.datadog.apm.enabled .Values.datadog.apm.portEnabled .Values.agents.useHostNetwork }} allowHostPorts: {{ or .Values.datadog.dogstatsd.useHostPort .Values.datadog.apm.enabled .Values.datadog.apm.portEnabled .Values.agents.useHostNetwork }}
# Allow host PID for dogstatsd origin detection # Allow host PID for dogstatsd origin detection

View File

@ -7,7 +7,7 @@ metadata:
{{ include "datadog.labels" . | indent 4 }} {{ include "datadog.labels" . | indent 4 }}
users: users:
- system:serviceaccount:{{ .Release.Namespace }}:{{ template "datadog.fullname" . }}-cluster-agent - system:serviceaccount:{{ .Release.Namespace }}:{{ template "datadog.fullname" . }}-cluster-agent
priority: 8 priority: null
# Allow host ports if hostNetwork # Allow host ports if hostNetwork
allowHostPorts: {{ .Values.clusterAgent.useHostNetwork }} allowHostPorts: {{ .Values.clusterAgent.useHostNetwork }}
allowHostNetwork: {{ .Values.clusterAgent.useHostNetwork}} allowHostNetwork: {{ .Values.clusterAgent.useHostNetwork}}

View File

@ -110,6 +110,7 @@ datadog:
## Set to 'us3.datadoghq.com' to send data to the US3 site. ## Set to 'us3.datadoghq.com' to send data to the US3 site.
## Set to 'us5.datadoghq.com' to send data to the US5 site. ## Set to 'us5.datadoghq.com' to send data to the US5 site.
## Set to 'ddog-gov.com' to send data to the US1-FED site. ## Set to 'ddog-gov.com' to send data to the US1-FED site.
## Set to 'ap1.datadoghq.com' to send data to the AP1 site.
site: # datadoghq.com site: # datadoghq.com
# datadog.dd_url -- The host of the Datadog intake server to send Agent data to, only set this option if you need the Agent to send data to a custom URL # datadog.dd_url -- The host of the Datadog intake server to send Agent data to, only set this option if you need the Agent to send data to a custom URL

View File

@ -9,7 +9,7 @@ annotations:
catalog.cattle.io/kube-version: '>=1.21-0' catalog.cattle.io/kube-version: '>=1.21-0'
catalog.cattle.io/release-name: instana-agent catalog.cattle.io/release-name: instana-agent
apiVersion: v2 apiVersion: v2
appVersion: 1.249.0 appVersion: 1.251.0
description: Instana Agent for Kubernetes description: Instana Agent for Kubernetes
home: https://www.instana.com/ home: https://www.instana.com/
icon: https://agents.instana.io/helm/stan-logo-2020.png icon: https://agents.instana.io/helm/stan-logo-2020.png
@ -23,4 +23,4 @@ maintainers:
name: instana-agent name: instana-agent
sources: sources:
- https://github.com/instana/instana-agent-docker - https://github.com/instana/instana-agent-docker
version: 1.2.59 version: 1.2.60

View File

@ -123,7 +123,7 @@ The following table lists the configurable parameters of the Instana chart and t
| `leaderElector.image.name` | The elector image name to pull | `instana/leader-elector` | | `leaderElector.image.name` | The elector image name to pull | `instana/leader-elector` |
| `leaderElector.image.digest` | The image digest to pull; if specified, it causes `leaderElector.image.tag` to be ignored | `nil` | | `leaderElector.image.digest` | The image digest to pull; if specified, it causes `leaderElector.image.tag` to be ignored | `nil` |
| `leaderElector.image.tag` | The image tag to pull; this property is ignored if `leaderElector.image.digest` is specified | `latest` | | `leaderElector.image.tag` | The image tag to pull; this property is ignored if `leaderElector.image.digest` is specified | `latest` |
| `k8s_sensor.deployment.enabled` | Isolate k8sensor with a deployment | `false` | | `k8s_sensor.deployment.enabled` | Isolate k8sensor with a deployment | `true` |
| `k8s_sensor.image.name` | The k8sensor image name to pull | `gcr.io/instana/k8sensor` | | `k8s_sensor.image.name` | The k8sensor image name to pull | `gcr.io/instana/k8sensor` |
| `k8s_sensor.image.digest` | The image digest to pull; if specified, it causes `k8s_sensor.image.tag` to be ignored | `nil` | | `k8s_sensor.image.digest` | The image digest to pull; if specified, it causes `k8s_sensor.image.tag` to be ignored | `nil` |
| `k8s_sensor.image.tag` | The image tag to pull; this property is ignored if `k8s_sensor.image.digest` is specified | `latest` | | `k8s_sensor.image.tag` | The image tag to pull; this property is ignored if `k8s_sensor.image.digest` is specified | `latest` |
@ -333,6 +333,9 @@ zones:
## Changelog ## Changelog
### 1.2.60
* Enable the k8s_sensor by default
### 1.2.59 ### 1.2.59
* Introduce unique selectorLabels and commonLabels for k8s-sensor deployment * Introduce unique selectorLabels and commonLabels for k8s-sensor deployment

View File

@ -240,9 +240,9 @@ k8s_sensor:
pullPolicy: Always pullPolicy: Always
deployment: deployment:
# Specifies whether or not to enable the Deployment and turn off the Kubernetes sensor in the DaemonSet # Specifies whether or not to enable the Deployment and turn off the Kubernetes sensor in the DaemonSet
enabled: false enabled: true
# Use a single replica, the impact will generally be low and we need to address a host of other concerns where clusters are large. # Use three replicas to ensure the HA by the default.
replicas: 1 replicas: 3
# k8s_sensor.deployment.pod adjusts the resource assignments for the agent independently of the DaemonSet agent when k8s_sensor.deployment.enabled=true # k8s_sensor.deployment.pod adjusts the resource assignments for the agent independently of the DaemonSet agent when k8s_sensor.deployment.enabled=true
pod: pod:
requests: requests:

View File

@ -10,8 +10,6 @@ description: A Helm chart for installing Percona XtraDB Cluster Databases using
home: https://www.percona.com/doc/kubernetes-operator-for-pxc/kubernetes.html home: https://www.percona.com/doc/kubernetes-operator-for-pxc/kubernetes.html
icon: https://raw.githubusercontent.com/percona/percona-xtradb-cluster-operator/main/operator.png icon: https://raw.githubusercontent.com/percona/percona-xtradb-cluster-operator/main/operator.png
maintainers: maintainers:
- email: ivan.pylypenko@percona.com
name: cap1984
- email: tomislav.plavcic@percona.com - email: tomislav.plavcic@percona.com
name: tplavcic name: tplavcic
- email: sergey.pronin@percona.com - email: sergey.pronin@percona.com
@ -19,4 +17,4 @@ maintainers:
- email: natalia.marukovich@percona.com - email: natalia.marukovich@percona.com
name: nmarukovich name: nmarukovich
name: pxc-db name: pxc-db
version: 1.12.2 version: 1.12.3

View File

@ -1,599 +0,0 @@
# example production ready values for pxc-cluster.
# (you may still need to tune this for your own needs)
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
finalizers:
- delete-pxc-pods-in-order
# Can delete proxysql PVCs, they're recreatable.
- delete-proxysql-pvc
## Don't delete database PVCs.
# - delete-pxc-pvc
## Set this if you want to delete cert manager certificates on cluster deletion
# - delete-ssl
nameOverride: "production"
fullnameOverride: "production"
operatorImageRepository: percona/percona-xtradb-cluster-operator
crVersion: 1.12.0
ignoreAnnotations: []
# - iam.amazonaws.com/role
ignoreLabels: []
# - rack
pause: false
initImage: ""
allowUnsafeConfigurations: false
updateStrategy: SmartUpdate
upgradeOptions:
versionServiceEndpoint: https://check.percona.com
apply: disabled
schedule: "0 4 * * *"
enableCRValidationWebhook: false
tls: {}
# SANs:
# - pxc-1.example.com
# - pxc-2.example.com
# - pxc-3.example.com
# issuerConf:
# name: special-selfsigned-issuer
# kind: ClusterIssuer
# group: cert-manager.io
pxc:
size: 3
image:
repository: percona/percona-xtradb-cluster
tag: 8.0.29-21.1
# imagePullPolicy: Always
autoRecovery: true
# expose:
# enabled: true
# type: LoadBalancer
# trafficPolicy: Local
# loadBalancerSourceRanges:
# - 10.0.0.0/8
# annotations:
# networking.gke.io/load-balancer-type: "Internal"
# replicationChannels:
# - name: pxc1_to_pxc2
# isSource: true
# - name: pxc2_to_pxc1
# isSource: false
# configuration:
# sourceRetryCount: 3
# sourceConnectRetry: 60
# ssl: false
# sslSkipVerify: true
# ca: '/etc/mysql/ssl/ca.crt'
# sourcesList:
# - host: 10.95.251.101
# port: 3306
# weight: 100
# schedulerName: mycustom-scheduler
imagePullSecrets: []
# - name: private-registry-credentials
annotations: {}
# iam.amazonaws.com/role: role-arn
labels: {}
# rack: rack-22
# priorityClassName: high-priority
readinessDelaySec: 15
livenessDelaySec: 300
## Uncomment to pass in a mysql config file
# configuration: |
# [mysqld]
# wsrep_debug=ON
# wsrep_provider_options="gcache.size=1G; gcache.recover=yes"
# envVarsSecret: my-env-var-secrets
resources:
# Set these to the miminum you'd expect your database to need.
requests:
memory: 1G
cpu: 600m
# db resources are sacred, so don't limit it.
limits: {}
# runtimeClassName: image-rc
sidecars: []
sidecarVolumes: []
sidecarPVCs: []
sidecarResources:
requests: {}
limits: {}
nodeSelector: {}
# disktype: ssd
affinity:
antiAffinityTopologyKey: "kubernetes.io/hostname"
# advanced:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: kubernetes.io/e2e-az-name
# operator: In
# values:
# - e2e-az1
# - e2e-az2
tolerations: []
# - key: "node.alpha.kubernetes.io/unreachable"
# operator: "Exists"
# effect: "NoExecute"
# tolerationSeconds: 6000
gracePeriod: 600
podDisruptionBudget:
# With only 3 nodes, don't let Kubernetes disrupt more than one at a time.
maxUnavailable: 1
# minAvailable: 0
persistence:
enabled: true
## set storage class if you need something fancy for your cloud.
# storageClass: "-"
accessMode: ReadWriteOnce
## Size this according to your expected data size. Resizing a PVC isn't easy
## So don't be tight fisted.
size: 8Gi
## Don't disable TLS you monster
disableTLS: false
## You should use certManager ... if you don't, you should create the certificates
## Don't let helm do it for you for prod.
certManager: true
## You should absolutely provide a pre-create secret here, don't rely on Helm to
## Pass in passwords etc.
# clusterSecretName:
readinessProbes:
initialDelaySeconds: 15
timeoutSeconds: 15
periodSeconds: 30
successThreshold: 1
failureThreshold: 5
livenessProbes:
initialDelaySeconds: 300
timeoutSeconds: 5
periodSeconds: 10
successThreshold: 1
failureThreshold: 3
# A custom Kubernetes Security Context for a Container to be used instead of the default one
# containerSecurityContext:
# privileged: false
# A custom Kubernetes Security Context for a Pod to be used instead of the default one
# podSecurityContext:
# fsGroup: 1001
# supplementalGroups:
# - 1001
# serviceAccountName: percona-xtradb-cluster-operator-workload
haproxy:
enabled: true
size: 3
image: ""
# imagePullPolicy: Always
imagePullSecrets: []
# - name: private-registry-credentials
# configuration: |
#
# the actual default configuration file can be found here https://github.com/percona/percona-docker/blob/main/haproxy/dockerdir/etc/haproxy/haproxy-global.cfg
#
# global
# maxconn 2048
# external-check
# insecure-fork-wanted
# stats socket /etc/haproxy/pxc/haproxy.sock mode 600 expose-fd listeners level admin
#
# defaults
# default-server init-addr last,libc,none
# log global
# mode tcp
# retries 10
# timeout client 28800s
# timeout connect 100500
# timeout server 28800s
#
# frontend galera-in
# bind *:3309 accept-proxy
# bind *:3306
# mode tcp
# option clitcpka
# default_backend galera-nodes
#
# frontend galera-admin-in
# bind *:33062
# mode tcp
# option clitcpka
# default_backend galera-admin-nodes
#
# frontend galera-replica-in
# bind *:3307
# mode tcp
# option clitcpka
# default_backend galera-replica-nodes
#
# frontend galera-mysqlx-in
# bind *:33060
# mode tcp
# option clitcpka
# default_backend galera-mysqlx-nodes
#
# frontend stats
# bind *:8404
# mode http
# option http-use-htx
# http-request use-service prometheus-exporter if { path /metrics }
annotations: {}
# iam.amazonaws.com/role: role-arn
labels: {}
# rack: rack-22
# serviceType: ClusterIP
# externalTrafficPolicy: Cluster
# runtimeClassName: image-rc
# loadBalancerSourceRanges:
# - 10.0.0.0/8
# loadBalancerIP: 127.0.0.1
# serviceAnnotations:
# service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http
# serviceLabels:
# rack: rack-23
replicasServiceEnabled: true
# replicasLoadBalancerSourceRanges:
# - 10.0.0.0/8
# replicasLoadBalancerIP: 127.0.0.1
# replicasServiceType: ClusterIP
# replicasExternalTrafficPolicy: Cluster
# replicasServiceAnnotations:
# service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http
# replicasServiceLabels:
# rack: rack-23
# priorityClassName: high-priority
# schedulerName: mycustom-scheduler
readinessDelaySec: 15
livenessDelaySec: 300
# envVarsSecret: my-env-var-secrets
resources:
requests:
memory: 1G
cpu: 600m
limits: {}
# memory: 1G
# cpu: 600m
sidecars: []
sidecarVolumes: []
sidecarPVCs: []
sidecarResources:
requests: {}
limits: {}
nodeSelector: {}
# disktype: ssd
# serviceAccountName: percona-xtradb-cluster-operator-workload
affinity:
antiAffinityTopologyKey: "kubernetes.io/hostname"
# advanced:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: kubernetes.io/e2e-az-name
# operator: In
# values:
# - e2e-az1
# - e2e-az2
tolerations: []
# - key: "node.alpha.kubernetes.io/unreachable"
# operator: "Exists"
# effect: "NoExecute"
# tolerationSeconds: 6000
gracePeriod: 30
# only one of `maxUnavailable` or `minAvailable` can be set.
podDisruptionBudget:
maxUnavailable: 1
# minAvailable: 0
readinessProbes:
initialDelaySeconds: 15
timeoutSeconds: 1
periodSeconds: 5
successThreshold: 1
failureThreshold: 3
livenessProbes:
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 30
successThreshold: 1
failureThreshold: 4
# A custom Kubernetes Security Context for a Container to be used instead of the default one
# containerSecurityContext:
# privileged: false
# A custom Kubernetes Security Context for a Pod to be used instead of the default one
# podSecurityContext:
# fsGroup: 1001
# supplementalGroups:
# - 1001
proxysql:
enabled: false
size: 3
image: ""
# imagePullPolicy: Always
imagePullSecrets: []
# configuration: |
# datadir="/var/lib/proxysql"
#
# admin_variables =
# {
# admin_credentials="proxyadmin:admin_password"
# mysql_ifaces="0.0.0.0:6032"
# refresh_interval=2000
#
# cluster_username="proxyadmin"
# cluster_password="admin_password"
# checksum_admin_variables=false
# checksum_ldap_variables=false
# checksum_mysql_variables=false
# cluster_check_interval_ms=200
# cluster_check_status_frequency=100
# cluster_mysql_query_rules_save_to_disk=true
# cluster_mysql_servers_save_to_disk=true
# cluster_mysql_users_save_to_disk=true
# cluster_proxysql_servers_save_to_disk=true
# cluster_mysql_query_rules_diffs_before_sync=1
# cluster_mysql_servers_diffs_before_sync=1
# cluster_mysql_users_diffs_before_sync=1
# cluster_proxysql_servers_diffs_before_sync=1
# }
#
# mysql_variables=
# {
# monitor_password="monitor"
# monitor_galera_healthcheck_interval=1000
# threads=2
# max_connections=2048
# default_query_delay=0
# default_query_timeout=10000
# poll_timeout=2000
# interfaces="0.0.0.0:3306"
# default_schema="information_schema"
# stacksize=1048576
# connect_timeout_server=10000
# monitor_history=60000
# monitor_connect_interval=20000
# monitor_ping_interval=10000
# ping_timeout_server=200
# commands_stats=true
# sessions_sort=true
# have_ssl=true
# ssl_p2s_ca="/etc/proxysql/ssl-internal/ca.crt"
# ssl_p2s_cert="/etc/proxysql/ssl-internal/tls.crt"
# ssl_p2s_key="/etc/proxysql/ssl-internal/tls.key"
# ssl_p2s_cipher="ECDHE-RSA-AES128-GCM-SHA256"
# }
# - name: private-registry-credentials
annotations: {}
# iam.amazonaws.com/role: role-arn
labels: {}
# rack: rack-22
# serviceType: ClusterIP
# externalTrafficPolicy: Cluster
# runtimeClassName: image-rc
# loadBalancerSourceRanges:
# - 10.0.0.0/8
# loadBalancerIP: 127.0.0.1
# serviceAnnotations:
# service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http
# serviceLabels:
# rack: rack-23
# priorityClassName: high-priority
# schedulerName: mycustom-scheduler
readinessDelaySec: 15
livenessDelaySec: 300
# envVarsSecret: my-env-var-secrets
resources:
requests:
memory: 1G
cpu: 600m
limits: {}
# memory: 1G
# cpu: 600m
sidecars: []
sidecarVolumes: []
sidecarPVCs: []
sidecarResources:
requests: {}
limits: {}
nodeSelector: {}
# disktype: ssd
# serviceAccountName: percona-xtradb-cluster-operator-workload
affinity:
antiAffinityTopologyKey: "kubernetes.io/hostname"
# advanced:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: kubernetes.io/e2e-az-name
# operator: In
# values:
# - e2e-az1
# - e2e-az2
tolerations: []
# - key: "node.alpha.kubernetes.io/unreachable"
# operator: "Exists"
# effect: "NoExecute"
# tolerationSeconds: 6000
gracePeriod: 30
# only one of `maxUnavailable` or `minAvailable` can be set.
podDisruptionBudget:
maxUnavailable: 1
# minAvailable: 0
persistence:
enabled: true
# storageClass: "-"
accessMode: ReadWriteOnce
size: 8Gi
# A custom Kubernetes Security Context for a Container to be used instead of the default one
# containerSecurityContext:
# privileged: false
# A custom Kubernetes Security Context for a Pod to be used instead of the default one
# podSecurityContext:
# fsGroup: 1001
# supplementalGroups:
# - 1001
logcollector:
enabled: true
image: ""
# imagePullPolicy: Always
imagePullSecrets: []
# configuration: |
# [OUTPUT]
# Name es
# Match *
# Host 192.168.2.3
# Port 9200
# Index my_index
# Type my_type
resources:
requests:
memory: 100M
cpu: 200m
limits: {}
pmm:
enabled: false
image:
repository: percona/pmm-client
tag: 2.32.0
# imagePullPolicy: Always
imagePullSecrets: []
serverHost: monitoring-service
serverUser: admin
resources:
requests:
memory: 150M
cpu: 300m
limits: {}
backup:
enabled: true
image: ""
# backoffLimit: 6
# serviceAccountName: percona-xtradb-cluster-operator
# imagePullPolicy: Always
imagePullSecrets: []
# - name: private-registry-credentials
pitr:
enabled: false
storageName: s3-us-west-binlogs
timeBetweenUploads: 60
resources:
requests: {}
limits: {}
storages:
fs-pvc:
type: filesystem
volume:
persistentVolumeClaim:
# storageClassName: standard
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 6Gi
## Set up backups to your S3 bucket!!!
# s3-us-west:
# type: s3
# verifyTLS: true
# nodeSelector:
# storage: tape
# backupWorker: 'True'
# resources:
# requests:
# memory: 1G
# cpu: 600m
# affinity:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: backupWorker
# operator: In
# values:
# - 'True'
# tolerations:
# - key: "backupWorker"
# operator: "Equal"
# value: "True"
# effect: "NoSchedule"
# annotations:
# testName: scheduled-backup
# labels:
# backupWorker: 'True'
# schedulerName: 'default-scheduler'
# priorityClassName: 'high-priority'
# containerSecurityContext:
# privileged: true
# podSecurityContext:
# fsGroup: 1001
# supplementalGroups: [1001, 1002, 1003]
# s3:
# bucket: S3-BACKUP-BUCKET-NAME-HERE
# # Use credentialsSecret OR credentialsAccessKey/credentialsSecretKey
# credentialsSecret: my-cluster-name-backup-s3
# #credentialsAccessKey: REPLACE-WITH-AWS-ACCESS-KEY
# #credentialsSecretKey: REPLACE-WITH-AWS-SECRET-KEY
# region: us-west-2
# endpointUrl: https://sfo2.digitaloceanspaces.com
# s3-us-west-binlogs:
# type: s3
# s3:
# bucket: S3-BACKUP-BUCKET-NAME-HERE/DIRECTORY
# credentialsSecret: my-cluster-name-backup-s3
# region: us-west-2
# endpointUrl: https://sfo2.digitaloceanspaces.com
# azure-blob:
# type: azure
# azure:
# credentialsSecret: azure-secret
# container: test
# endpointUrl: https://accountName.blob.core.windows.net
# storageClass: Hot
schedule:
- name: "daily-backup"
schedule: "0 0 * * *"
keep: 5
storageName: fs-pvc
## Schedule s3 backups!!
# - name: "sat-night-backup"
# schedule: "0 0 * * 6"
# keep: 3
# storageName: s3-us-west
secrets:
## You should be overriding these with your own or specify name for clusterSecretName.
passwords:
root: insecure-root-password
xtrabackup: insecure-xtrabackup-password
monitor: insecure-monitor-password
clustercheck: insecure-clustercheck-password
proxyadmin: insecure-proxyadmin-password
pmmserver: insecure-pmmserver-password
# If pmmserverkey is set in that case pmmserver pass will not be included
# pmmserverkey: set-pmmserver-api-key
operator: insecure-operator-password
replication: insecure-replication-password
## If you are using `cert-manager` you can skip this next section.
## If not using cert-manager, you should set these!!!!
tls: {}
# This should be the name of a secret that contains certificates.
# it should have the following keys: `ca.crt`, `tls.crt`, `tls.key`
# If not set the Helm chart will attempt to create certificates
# for you [not recommended for prod]:
# cluster:
# This should be the name of a secret that contains certificates.
# it should have the following keys: `ca.crt`, `tls.crt`, `tls.key`
# If not set the Helm chart will attempt to create certificates
# for you [not recommended for prod]:
# internal:
# logCollector: cluster1-log-collector-secrets
# vault: keyring-secret-vault

View File

@ -1,8 +1,12 @@
{{- if not (hasKey .Values.pxc "clusterSecretName") }} {{- if hasKey .Values.secrets "passwords" }}
apiVersion: v1 apiVersion: v1
kind: Secret kind: Secret
metadata: metadata:
name: {{ include "pxc-database.fullname" . }} {{- if hasKey .Values.pxc "clusterSecretName" }}
name: {{ .Values.pxc.clusterSecretName }}
{{- else }}
name: {{ include "pxc-database.fullname" . }}-secrets
{{- end }}
labels: labels:
{{ include "pxc-database.labels" . | indent 4 }} {{ include "pxc-database.labels" . | indent 4 }}
type: Opaque type: Opaque

View File

@ -19,7 +19,7 @@ spec:
{{- if hasKey .Values.pxc "clusterSecretName" }} {{- if hasKey .Values.pxc "clusterSecretName" }}
secretsName: {{ .Values.pxc.clusterSecretName }} secretsName: {{ .Values.pxc.clusterSecretName }}
{{- else }} {{- else }}
secretsName: {{ include "pxc-database.fullname" . }} secretsName: {{ include "pxc-database.fullname" . }}-secrets
{{- end }} {{- end }}
{{- if not .Values.pxc.disableTLS }} {{- if not .Values.pxc.disableTLS }}
{{- if hasKey .Values.secrets.tls "cluster" }} {{- if hasKey .Values.secrets.tls "cluster" }}

View File

@ -5,9 +5,9 @@
finalizers: finalizers:
- delete-pxc-pods-in-order - delete-pxc-pods-in-order
## Set this if you want to delete proxysql persistent volumes on cluster deletion ## Set this if you want to delete proxysql persistent volumes on cluster deletion
- delete-proxysql-pvc # - delete-proxysql-pvc
## Set this if you want to delete database persistent volumes on cluster deletion ## Set this if you want to delete database persistent volumes on cluster deletion
- delete-pxc-pvc # - delete-pxc-pvc
## Set this if you want to delete cert manager certificates on cluster deletion ## Set this if you want to delete cert manager certificates on cluster deletion
# - delete-ssl # - delete-ssl
@ -125,10 +125,7 @@ pxc:
maxUnavailable: 1 maxUnavailable: 1
# minAvailable: 0 # minAvailable: 0
persistence: persistence:
enabled: false enabled: true
# if persistence is enabled, you can specify a hostPath (not recommended)
# hostPath: /data/mysql
# otherwise you can specify values for a storage claim (default)
## percona data Persistent Volume Storage Class ## percona data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass> ## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning ## If set to "-", storageClassName: "", which disables dynamic provisioning
@ -149,7 +146,7 @@ pxc:
# If this is set will not create secrets from values and will instead try to use # If this is set will not create secrets from values and will instead try to use
# a pre-existing secret of the same name. # a pre-existing secret of the same name.
# clusterSecretName: # clusterSecretName: cluster1-secrets
readinessProbes: readinessProbes:
initialDelaySeconds: 15 initialDelaySeconds: 15
timeoutSeconds: 15 timeoutSeconds: 15
@ -433,10 +430,7 @@ proxysql:
# minAvailable: 0 # minAvailable: 0
persistence: persistence:
enabled: true enabled: true
# if persistence is enabled, you can specify a hostPath (not recommended) ## Percona data Persistent Volume Storage Class
# hostPath: /data/mysql
# otherwise you can specify values for a storage claim (default)
## percona data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass> ## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning ## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is ## If undefined (the default) or set to null, no storageClassName spec is
@ -492,6 +486,7 @@ pmm:
backup: backup:
enabled: true enabled: true
image: "" image: ""
# backoffLimit: 6
# serviceAccountName: percona-xtradb-cluster-operator # serviceAccountName: percona-xtradb-cluster-operator
# imagePullPolicy: Always # imagePullPolicy: Always
imagePullSecrets: [] imagePullSecrets: []
@ -583,17 +578,17 @@ backup:
secrets: secrets:
## You should be overriding these with your own or specify name for clusterSecretName. ## You should be overriding these with your own or specify name for clusterSecretName.
passwords: # passwords:
root: insecure-root-password # root: insecure-root-password
xtrabackup: insecure-xtrabackup-password # xtrabackup: insecure-xtrabackup-password
monitor: insecure-monitor-password # monitor: insecure-monitor-password
clustercheck: insecure-clustercheck-password # clustercheck: insecure-clustercheck-password
proxyadmin: insecure-proxyadmin-password # proxyadmin: insecure-proxyadmin-password
pmmserver: insecure-pmmserver-password # pmmserver: insecure-pmmserver-password
# If pmmserverkey is set in that case pmmserver pass will not be included # # If pmmserverkey is set in that case pmmserver pass will not be included
# pmmserverkey: set-pmmserver-api-key # # pmmserverkey: set-pmmserver-api-key
operator: insecure-operator-password # operator: insecure-operator-password
replication: insecure-replication-password # replication: insecure-replication-password
## If you are using `cert-manager` you can skip this next section. ## If you are using `cert-manager` you can skip this next section.
tls: {} tls: {}
# This should be the name of a secret that contains certificates. # This should be the name of a secret that contains certificates.

View File

@ -11,8 +11,6 @@ description: A Helm chart for deploying the Percona Operator for MySQL (based on
home: https://docs.percona.com/percona-operator-for-mysql/pxc/ home: https://docs.percona.com/percona-operator-for-mysql/pxc/
icon: https://raw.githubusercontent.com/percona/percona-xtradb-cluster-operator/main/operator.png icon: https://raw.githubusercontent.com/percona/percona-xtradb-cluster-operator/main/operator.png
maintainers: maintainers:
- email: ivan.pylypenko@percona.com
name: cap1984
- email: tomislav.plavcic@percona.com - email: tomislav.plavcic@percona.com
name: tplavcic name: tplavcic
- email: natalia.marukovich@percona.com - email: natalia.marukovich@percona.com
@ -20,4 +18,4 @@ maintainers:
- email: sergey.pronin@percona.com - email: sergey.pronin@percona.com
name: spron-in name: spron-in
name: pxc-operator name: pxc-operator
version: 1.12.1 version: 1.12.2

View File

@ -3,4 +3,6 @@ apiVersion: v1
kind: Namespace kind: Namespace
metadata: metadata:
name: {{ .Values.watchNamespace }} name: {{ .Values.watchNamespace }}
annotations:
helm.sh/resource-policy: keep
{{ end }} {{ end }}

View File

@ -1,6 +1,6 @@
dependencies: dependencies:
- name: console - name: console
repository: https://charts.redpanda.com repository: https://charts.redpanda.com
version: 0.6.5 version: 0.6.6
digest: sha256:533659a56594d97d5583ad1d9796089d5bd8240db8037ab010dc9c959f16cce9 digest: sha256:af20a82c5cb646895892b783bdcfc50ca41f3f67ec14606c40236969c6a166e4
generated: "2023-06-01T20:32:16.065074963Z" generated: "2023-06-13T13:11:04.974481059Z"

View File

@ -15,7 +15,7 @@ annotations:
catalog.cattle.io/kube-version: '>=1.21-0' catalog.cattle.io/kube-version: '>=1.21-0'
catalog.cattle.io/release-name: redpanda catalog.cattle.io/release-name: redpanda
apiVersion: v2 apiVersion: v2
appVersion: v23.1.11 appVersion: v23.1.12
dependencies: dependencies:
- condition: console.enabled - condition: console.enabled
name: console name: console
@ -31,4 +31,4 @@ name: redpanda
sources: sources:
- https://github.com/redpanda-data/helm-charts - https://github.com/redpanda-data/helm-charts
type: application type: application
version: 4.0.32 version: 4.0.33

View File

@ -19,4 +19,4 @@ name: console
sources: sources:
- https://github.com/redpanda-data/helm-charts - https://github.com/redpanda-data/helm-charts
type: application type: application
version: 0.6.5 version: 0.6.6

View File

@ -38,7 +38,9 @@ spec:
{{- end }} {{- end }}
rules: rules:
{{- range .Values.ingress.hosts }} {{- range .Values.ingress.hosts }}
- host: {{ tpl (.host) $ | quote }} - {{- if .host }}
host: {{ tpl (.host) $ | quote }}
{{- end}}
http: http:
paths: paths:
{{- range .paths }} {{- range .paths }}

View File

@ -4,7 +4,7 @@ annotations:
catalog.cattle.io/kube-version: '>= 1.17.0-0' catalog.cattle.io/kube-version: '>= 1.17.0-0'
catalog.cattle.io/release-name: speedscale-operator catalog.cattle.io/release-name: speedscale-operator
apiVersion: v1 apiVersion: v1
appVersion: 1.3.94 appVersion: 1.3.99
description: Stress test your APIs with real world scenarios. Collect and replay description: Stress test your APIs with real world scenarios. Collect and replay
traffic without scripting. traffic without scripting.
home: https://speedscale.com home: https://speedscale.com
@ -24,4 +24,4 @@ maintainers:
- email: support@speedscale.com - email: support@speedscale.com
name: Speedscale Support name: Speedscale Support
name: speedscale-operator name: speedscale-operator
version: 1.3.12 version: 1.3.13

View File

@ -101,10 +101,10 @@ _See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documen
A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an
incompatible breaking change needing manual actions. incompatible breaking change needing manual actions.
### Upgrade to 1.3.12 ### Upgrade to 1.3.13
```bash ```bash
kubectl apply --server-side -f https://raw.githubusercontent.com/speedscale/operator-helm/main/1.3.12/templates/crds/trafficreplays.yaml kubectl apply --server-side -f https://raw.githubusercontent.com/speedscale/operator-helm/main/1.3.13/templates/crds/trafficreplays.yaml
``` ```
### Upgrade to 1.1.0 ### Upgrade to 1.1.0

View File

@ -101,10 +101,10 @@ _See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documen
A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an
incompatible breaking change needing manual actions. incompatible breaking change needing manual actions.
### Upgrade to 1.3.12 ### Upgrade to 1.3.13
```bash ```bash
kubectl apply --server-side -f https://raw.githubusercontent.com/speedscale/operator-helm/main/1.3.12/templates/crds/trafficreplays.yaml kubectl apply --server-side -f https://raw.githubusercontent.com/speedscale/operator-helm/main/1.3.13/templates/crds/trafficreplays.yaml
``` ```
### Upgrade to 1.1.0 ### Upgrade to 1.1.0

View File

@ -20,7 +20,7 @@ clusterName: "my-cluster"
# Speedscale components image settings. # Speedscale components image settings.
image: image:
registry: gcr.io/speedscale registry: gcr.io/speedscale
tag: v1.3.94 tag: v1.3.99
pullPolicy: Always pullPolicy: Always
# Log level for Speedscale components. # Log level for Speedscale components.

View File

@ -10,6 +10,13 @@ Manual edits are supported only below '## Change Log' and should be used
exclusively to fix incorrect entries and not to add new ones. exclusively to fix incorrect entries and not to add new ones.
## Change Log ## Change Log
# v1.15.92
### Chores
* **sysdig, node-analyzer** [f9c8e102](https://github.com/sysdiglabs/charts/commit/f9c8e1028b4b4e79aba10e72f762f14956d54273): bump sysdig/vuln-runtime-scanner to v1.5 ([#1160](https://github.com/sysdiglabs/charts/issues/1160))
* * Runtimescanner bumped to 1.5
* Updated the analyzer to inspects also vendor paths
* Added support to apply image based accepts for all versions of the image, that image in a specific registry & repository, as well as a contain string for customized subsets of the environment
# v1.15.91 # v1.15.91
### Chores ### Chores
* **sysdig, node-analyzer** [601a0685](https://github.com/sysdiglabs/charts/commit/601a0685639cefc10ec9120c74153f4bb2ac2473): bump sysdig/vuln-runtime-scanner to v1.4.12 ([#1127](https://github.com/sysdiglabs/charts/issues/1127)) * **sysdig, node-analyzer** [601a0685](https://github.com/sysdiglabs/charts/commit/601a0685639cefc10ec9120c74153f4bb2ac2473): bump sysdig/vuln-runtime-scanner to v1.4.12 ([#1127](https://github.com/sysdiglabs/charts/issues/1127))

View File

@ -29,4 +29,4 @@ name: sysdig
sources: sources:
- https://app.sysdigcloud.com/#/settings/user - https://app.sysdigcloud.com/#/settings/user
- https://github.com/draios/sysdig - https://github.com/draios/sysdig
version: 1.15.91 version: 1.15.92

View File

@ -194,7 +194,7 @@ The following table lists the configurable parameters of the Sysdig chart and th
| `nodeAnalyzer.runtimeScanner.deploy` | Deploy the Runtime Scanner | `false` | | `nodeAnalyzer.runtimeScanner.deploy` | Deploy the Runtime Scanner | `false` |
| `nodeAnalyzer.runtimeScanner.extraMounts` | Specify a container engine custom socket path (docker, containerd, CRI-O) | | | `nodeAnalyzer.runtimeScanner.extraMounts` | Specify a container engine custom socket path (docker, containerd, CRI-O) | |
| `nodeAnalyzer.runtimeScanner.image.repository` | The image repository to pull the Runtime Scanner from | `sysdig/vuln-runtime-scanner` | | `nodeAnalyzer.runtimeScanner.image.repository` | The image repository to pull the Runtime Scanner from | `sysdig/vuln-runtime-scanner` |
| `nodeAnalyzer.runtimeScanner.image.tag` | The image tag to pull the Runtime Scanner | `1.4.12` | | `nodeAnalyzer.runtimeScanner.image.tag` | The image tag to pull the Runtime Scanner | `1.5` |
| `nodeAnalyzer.runtimeScanner.image.digest` | The image digest to pull | ` ` | | `nodeAnalyzer.runtimeScanner.image.digest` | The image digest to pull | ` ` |
| `nodeAnalyzer.runtimeScanner.image.pullPolicy` | The image pull policy for the Runtime Scanner | `IfNotPresent` | | `nodeAnalyzer.runtimeScanner.image.pullPolicy` | The image pull policy for the Runtime Scanner | `IfNotPresent` |
| `nodeAnalyzer.runtimeScanner.resources.requests.cpu` | Runtime Scanner CPU requests per node | `250m` | | `nodeAnalyzer.runtimeScanner.resources.requests.cpu` | Runtime Scanner CPU requests per node | `250m` |

View File

@ -1,6 +1,10 @@
# What's Changed # What's Changed
### Chores ### Chores
- **sysdig, node-analyzer** [601a0685](https://github.com/sysdiglabs/charts/commit/601a0685639cefc10ec9120c74153f4bb2ac2473): bump sysdig/vuln-runtime-scanner to v1.4.12 ([#1127](https://github.com/sysdiglabs/charts/issues/1127)) - **sysdig, node-analyzer** [f9c8e102](https://github.com/sysdiglabs/charts/commit/f9c8e1028b4b4e79aba10e72f762f14956d54273): bump sysdig/vuln-runtime-scanner to v1.5 ([#1160](https://github.com/sysdiglabs/charts/issues/1160))
#### Full diff: https://github.com/sysdiglabs/charts/compare/sysdig-deploy-1.8.14...sysdig-1.15.91 * * Runtimescanner bumped to 1.5
* Updated the analyzer to inspects also vendor paths
* Added support to apply image based accepts for all versions of the image, that image in a specific registry & repository, as well as a contain string for customized subsets of the environment
#### Full diff: https://github.com/sysdiglabs/charts/compare/sysdig-deploy-1.8.21...sysdig-1.15.92

View File

@ -505,7 +505,7 @@ nodeAnalyzer:
deploy: false deploy: false
image: image:
repository: sysdig/vuln-runtime-scanner repository: sysdig/vuln-runtime-scanner
tag: 1.4.12 tag: 1.5
digest: digest:
pullPolicy: IfNotPresent pullPolicy: IfNotPresent

View File

@ -7865,6 +7865,35 @@ entries:
- assets/codefresh/cf-runtime-0.1.401.tgz - assets/codefresh/cf-runtime-0.1.401.tgz
version: 0.1.401 version: 0.1.401
chronicle: chronicle:
- annotations:
catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: Chronicle
catalog.cattle.io/release-name: chronicle
apiVersion: v2
appVersion: 0.7.3
created: "2023-06-14T14:15:40.570407012Z"
dependencies:
- name: standard-defs
repository: https://btp-charts-stable.s3.amazonaws.com/charts/
version: ~0.1.0
- name: sawtooth
repository: https://btp-charts-unstable.s3.amazonaws.com/charts/
version: ~0.2.0
description: 'Chronicle is an open-source, blockchain-backed, domain-agnostic
provenance product. Chronicle makes it easy for users to record and query immutable
provenance information on a distributed ledger - about any asset, in any domain,
and across multiple parties. '
digest: feb29bdf2b9e6e6da16b96085ce2015438dc02e00cc0758b38efa546eb2aec78
home: https://docs.btp.works/chronicle
icon: https://chronicle-resources.s3.amazonaws.com/icons/chronicle-transparent-bg-dark.png
keywords:
- provenance
- blockchain
name: chronicle
type: application
urls:
- assets/btp/chronicle-0.1.16.tgz
version: 0.1.16
- annotations: - annotations:
catalog.cattle.io/certified: partner catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: Chronicle catalog.cattle.io/display-name: Chronicle
@ -8733,6 +8762,27 @@ entries:
- assets/cloudcasa/cloudcasa-0.1.000.tgz - assets/cloudcasa/cloudcasa-0.1.000.tgz
version: 0.1.000 version: 0.1.000
cockroachdb: cockroachdb:
- annotations:
catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: CockroachDB
catalog.cattle.io/kube-version: '>=1.8-0'
catalog.cattle.io/release-name: cockroachdb
apiVersion: v1
appVersion: 23.1.3
created: "2023-06-14T14:15:40.70472499Z"
description: CockroachDB is a scalable, survivable, strongly-consistent SQL database.
digest: 482154d4778d01e1de8b54cd0c8c6042bd9e504d0b9673dd5b68bae164853416
home: https://www.cockroachlabs.com
icon: https://raw.githubusercontent.com/cockroachdb/cockroach/master/docs/media/cockroach_db.png
maintainers:
- email: helm-charts@cockroachlabs.com
name: cockroachlabs
name: cockroachdb
sources:
- https://github.com/cockroachdb/cockroach
urls:
- assets/cockroach-labs/cockroachdb-11.0.2.tgz
version: 11.0.2
- annotations: - annotations:
catalog.cattle.io/certified: partner catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: CockroachDB catalog.cattle.io/display-name: CockroachDB
@ -11167,6 +11217,43 @@ entries:
- assets/weka/csi-wekafsplugin-0.6.400.tgz - assets/weka/csi-wekafsplugin-0.6.400.tgz
version: 0.6.400 version: 0.6.400
datadog: datadog:
- annotations:
catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: Datadog
catalog.cattle.io/kube-version: '>=1.10-0'
catalog.cattle.io/release-name: datadog
apiVersion: v1
appVersion: "7"
created: "2023-06-14T14:15:41.125445286Z"
dependencies:
- condition: clusterAgent.metricsProvider.useDatadogMetrics
name: datadog-crds
repository: https://helm.datadoghq.com
tags:
- install-crds
version: 0.4.7
- condition: datadog.kubeStateMetricsEnabled
name: kube-state-metrics
repository: https://prometheus-community.github.io/helm-charts
version: 2.13.2
description: Datadog Agent
digest: 07ca5f6457648737bfa448cefe9342c913e8e28e0f8cd0f661b9051a10b4e1c4
home: https://www.datadoghq.com
icon: https://datadog-live.imgix.net/img/dd_logo_70x75.png
keywords:
- monitoring
- alerting
- metric
maintainers:
- email: support@datadoghq.com
name: Datadog
name: datadog
sources:
- https://app.datadoghq.com/account/settings#agent/kubernetes
- https://github.com/DataDog/datadog-agent
urls:
- assets/datadog/datadog-3.32.2.tgz
version: 3.32.2
- annotations: - annotations:
catalog.cattle.io/certified: partner catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: Datadog catalog.cattle.io/display-name: Datadog
@ -16926,6 +17013,36 @@ entries:
- assets/hpe/hpe-flexvolume-driver-3.1.000.tgz - assets/hpe/hpe-flexvolume-driver-3.1.000.tgz
version: 3.1.000 version: 3.1.000
instana-agent: instana-agent:
- annotations:
artifacthub.io/links: |
- name: Instana website
url: https://www.instana.com
- name: Instana Helm charts
url: https://github.com/instana/helm-charts
catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: Instana Agent
catalog.cattle.io/kube-version: '>=1.21-0'
catalog.cattle.io/release-name: instana-agent
apiVersion: v2
appVersion: 1.251.0
created: "2023-06-14T14:15:41.731845611Z"
description: Instana Agent for Kubernetes
digest: 39176253b97b7cd4bed9f48ce52720bd80dbdb12922e0be65cfca63b65a620b6
home: https://www.instana.com/
icon: https://agents.instana.io/helm/stan-logo-2020.png
maintainers:
- email: felix.marx@ibm.com
name: FelixMarxIBM
- email: henning.treu@ibm.com
name: htreu
- email: torsten.kohn@ibm.com
name: tkohn
name: instana-agent
sources:
- https://github.com/instana/instana-agent-docker
urls:
- assets/instana/instana-agent-1.2.60.tgz
version: 1.2.60
- annotations: - annotations:
artifacthub.io/links: | artifacthub.io/links: |
- name: Instana website - name: Instana website
@ -21689,6 +21806,38 @@ entries:
- assets/bitnami/kafka-19.0.1.tgz - assets/bitnami/kafka-19.0.1.tgz
version: 19.0.1 version: 19.0.1
kamaji: kamaji:
- annotations:
catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: Kamaji
catalog.cattle.io/kube-version: '>=1.21.0-0'
catalog.cattle.io/release-name: kamaji
apiVersion: v2
appVersion: v0.3.0
created: "2023-06-14T14:15:40.666733831Z"
description: Kamaji deploys and operates Kubernetes at scale with a fraction of
the operational burden. Kamaji turns any Kubernetes cluster into an “admin cluster”
to orchestrate other Kubernetes clusters called “tenant clusters”. Kamaji is
special because the Control Plane components are running in a single pod instead
of dedicated machines. This solution makes running multiple Control Planes cheaper
and easier to deploy and operate.
digest: 263709132b5e48d91aa11be3b04a074e2517fc1a91784e84744a627aafb788fe
home: https://github.com/clastix/kamaji
icon: https://github.com/clastix/kamaji/raw/master/assets/logo-colored.png
kubeVersion: '>=1.21.0-0'
maintainers:
- email: dario@tranchitella.eu
name: Dario Tranchitella
- email: me@maxgio.it
name: Massimiliano Giovagnoli
- email: me@bsctl.io
name: Adriano Pezzuto
name: kamaji
sources:
- https://github.com/clastix/kamaji
type: application
urls:
- assets/clastix/kamaji-0.12.1.tgz
version: 0.12.1
- annotations: - annotations:
catalog.cattle.io/certified: partner catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: Kamaji - Managed Kubernetes Service catalog.cattle.io/display-name: Kamaji - Managed Kubernetes Service
@ -31284,6 +31433,30 @@ entries:
- assets/percona/psmdb-operator-1.13.1.tgz - assets/percona/psmdb-operator-1.13.1.tgz
version: 1.13.1 version: 1.13.1
pxc-db: pxc-db:
- annotations:
catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: Percona XtraDB Cluster
catalog.cattle.io/kube-version: '>=1.21-0'
catalog.cattle.io/release-name: pxc-db
apiVersion: v2
appVersion: 1.12.0
created: "2023-06-14T14:15:43.98981563Z"
description: A Helm chart for installing Percona XtraDB Cluster Databases using
the PXC Operator.
digest: 64669f00a173d72a925317d9d404ee230c949d6bf78867bded7bff8f571d3d03
home: https://www.percona.com/doc/kubernetes-operator-for-pxc/kubernetes.html
icon: https://raw.githubusercontent.com/percona/percona-xtradb-cluster-operator/main/operator.png
maintainers:
- email: tomislav.plavcic@percona.com
name: tplavcic
- email: sergey.pronin@percona.com
name: spron-in
- email: natalia.marukovich@percona.com
name: nmarukovich
name: pxc-db
urls:
- assets/percona/pxc-db-1.12.3.tgz
version: 1.12.3
- annotations: - annotations:
catalog.cattle.io/certified: partner catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: Percona XtraDB Cluster catalog.cattle.io/display-name: Percona XtraDB Cluster
@ -31363,6 +31536,31 @@ entries:
- assets/percona/pxc-db-1.12.0.tgz - assets/percona/pxc-db-1.12.0.tgz
version: 1.12.0 version: 1.12.0
pxc-operator: pxc-operator:
- annotations:
catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: Percona Operator For MySQL based on Percona
XtraDB Cluster
catalog.cattle.io/kube-version: '>=1.21-0'
catalog.cattle.io/release-name: pxc-operator
apiVersion: v2
appVersion: 1.12.0
created: "2023-06-14T14:15:43.993732788Z"
description: A Helm chart for deploying the Percona Operator for MySQL (based
on Percona XtraDB Cluster)
digest: 087acbc70e4e9b3304e212bba786d496d901b0170ad3355d7e99d75a73bee5ff
home: https://docs.percona.com/percona-operator-for-mysql/pxc/
icon: https://raw.githubusercontent.com/percona/percona-xtradb-cluster-operator/main/operator.png
maintainers:
- email: tomislav.plavcic@percona.com
name: tplavcic
- email: natalia.marukovich@percona.com
name: nmarukovich
- email: sergey.pronin@percona.com
name: spron-in
name: pxc-operator
urls:
- assets/percona/pxc-operator-1.12.2.tgz
version: 1.12.2
- annotations: - annotations:
catalog.cattle.io/certified: partner catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: Percona Operator For MySQL based on Percona catalog.cattle.io/display-name: Percona Operator For MySQL based on Percona
@ -31511,6 +31709,41 @@ entries:
- assets/quobyte/quobyte-cluster-0.1.5.tgz - assets/quobyte/quobyte-cluster-0.1.5.tgz
version: 0.1.5 version: 0.1.5
redis: redis:
- annotations:
catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: Redis
catalog.cattle.io/kube-version: '>=1.19-0'
catalog.cattle.io/release-name: redis
category: Database
licenses: Apache-2.0
apiVersion: v2
appVersion: 7.0.11
created: "2023-06-14T14:15:39.629388655Z"
dependencies:
- name: common
repository: file://./charts/common
tags:
- bitnami-common
version: 2.x.x
description: Redis(R) is an open source, advanced key-value store. It is often
referred to as a data structure server since keys can contain strings, hashes,
lists, sets and sorted sets.
digest: 060e2938f8ea4de7952f7cb1d8450954fe49bfb692215ee76520a28a94f26a6f
home: https://bitnami.com
icon: https://redis.com/wp-content/uploads/2021/08/redis-logo.png
keywords:
- redis
- keyvalue
- database
maintainers:
- name: VMware, Inc.
url: https://github.com/bitnami/charts
name: redis
sources:
- https://github.com/bitnami/charts/tree/main/bitnami/redis
urls:
- assets/bitnami/redis-17.11.4.tgz
version: 17.11.4
- annotations: - annotations:
catalog.cattle.io/certified: partner catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: Redis catalog.cattle.io/display-name: Redis
@ -32619,6 +32852,44 @@ entries:
- assets/bitnami/redis-17.3.7.tgz - assets/bitnami/redis-17.3.7.tgz
version: 17.3.7 version: 17.3.7
redpanda: redpanda:
- annotations:
artifacthub.io/images: |
- name: redpanda
image: docker.redpanda.com/redpandadata/redpanda:v23.1.10
- name: busybox
image: busybox:latest
artifacthub.io/license: Apache-2.0
artifacthub.io/links: |
- name: Documentation
url: https://docs.redpanda.com
- name: "Helm (>= 3.6.0)"
url: https://helm.sh/docs/intro/install/
catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: Redpanda
catalog.cattle.io/kube-version: '>=1.21-0'
catalog.cattle.io/release-name: redpanda
apiVersion: v2
appVersion: v23.1.12
created: "2023-06-14T14:15:44.220653526Z"
dependencies:
- condition: console.enabled
name: console
repository: file://./charts/console
version: '>=0.5 <1.0'
description: Redpanda is the real-time engine for modern apps.
digest: b8bc68c1b94d5b46c0759f0a40ed36e0c486fa16575709c7fb4ed43f45fc8dc9
icon: https://images.ctfassets.net/paqvtpyf8rwu/3cYHw5UzhXCbKuR24GDFGO/73fb682e6157d11c10d5b2b5da1d5af0/skate-stand-panda.svg
kubeVersion: '>=1.21-0'
maintainers:
- name: redpanda-data
url: https://github.com/orgs/redpanda-data/people
name: redpanda
sources:
- https://github.com/redpanda-data/helm-charts
type: application
urls:
- assets/redpanda/redpanda-4.0.33.tgz
version: 4.0.33
- annotations: - annotations:
artifacthub.io/images: | artifacthub.io/images: |
- name: redpanda - name: redpanda
@ -35975,6 +36246,37 @@ entries:
- assets/bitnami/spark-6.3.8.tgz - assets/bitnami/spark-6.3.8.tgz
version: 6.3.8 version: 6.3.8
speedscale-operator: speedscale-operator:
- annotations:
catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: Speedscale Operator
catalog.cattle.io/kube-version: '>= 1.17.0-0'
catalog.cattle.io/release-name: speedscale-operator
apiVersion: v1
appVersion: 1.3.99
created: "2023-06-14T14:15:44.313961099Z"
description: Stress test your APIs with real world scenarios. Collect and replay
traffic without scripting.
digest: 8e0d0294fd5664a26196ef61b1784900ce0f72b474446f4cd750001b02a8a819
home: https://speedscale.com
icon: https://raw.githubusercontent.com/speedscale/assets/main/logo/gold_logo_only.png
keywords:
- speedscale
- test
- testing
- regression
- reliability
- load
- replay
- network
- traffic
kubeVersion: '>= 1.17.0-0'
maintainers:
- email: support@speedscale.com
name: Speedscale Support
name: speedscale-operator
urls:
- assets/speedscale/speedscale-operator-1.3.13.tgz
version: 1.3.13
- annotations: - annotations:
catalog.cattle.io/certified: partner catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: Speedscale Operator catalog.cattle.io/display-name: Speedscale Operator
@ -37329,6 +37631,42 @@ entries:
- assets/sumologic/sumologic-2.17.0.tgz - assets/sumologic/sumologic-2.17.0.tgz
version: 2.17.0 version: 2.17.0
sysdig: sysdig:
- annotations:
catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: Sysdig
catalog.cattle.io/release-name: sysdig
apiVersion: v1
appVersion: 12.14.1
created: "2023-06-14T14:15:44.589942059Z"
description: Sysdig Monitor and Secure agent
digest: 05e4d4a2bf8490c273f7cfafd89de066b56378422fa35a6817a4d9080d73a3e2
home: https://www.sysdig.com/
icon: https://avatars.githubusercontent.com/u/5068817?s=200&v=4
keywords:
- monitoring
- security
- alerting
- metric
- troubleshooting
- run-time
maintainers:
- email: lachlan@deis.com
name: lachie83
- email: jorge.salamero@sysdig.com
name: bencer
- email: nestor.salceda@sysdig.com
name: nestorsalceda
- email: alvaro.iradier@sysdig.com
name: airadier
- email: carlos.arilla@sysdig.com
name: carillan81
name: sysdig
sources:
- https://app.sysdigcloud.com/#/settings/user
- https://github.com/draios/sysdig
urls:
- assets/sysdig/sysdig-1.15.92.tgz
version: 1.15.92
- annotations: - annotations:
catalog.cattle.io/certified: partner catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: Sysdig catalog.cattle.io/display-name: Sysdig
@ -38370,6 +38708,44 @@ entries:
- assets/intel/tcs-issuer-0.1.0.tgz - assets/intel/tcs-issuer-0.1.0.tgz
version: 0.1.0 version: 0.1.0
tomcat: tomcat:
- annotations:
catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: Apache Tomcat
catalog.cattle.io/kube-version: '>=1.19-0'
catalog.cattle.io/release-name: tomcat
category: ApplicationServer
licenses: Apache-2.0
apiVersion: v2
appVersion: 10.1.10
created: "2023-06-14T14:15:39.942976673Z"
dependencies:
- name: common
repository: file://./charts/common
tags:
- bitnami-common
version: 2.x.x
description: Apache Tomcat is an open-source web server designed to host and run
Java-based web applications. It is a lightweight server with a good performance
for applications running in production environments.
digest: b0ffb1d66025d4cc4213877ce0047f8d833397f147537ab438056407401ec36c
home: https://bitnami.com
icon: https://svn.apache.org/repos/asf/comdev/project-logos/originals/tomcat.svg
keywords:
- tomcat
- java
- http
- web
- application server
- jsp
maintainers:
- name: VMware, Inc.
url: https://github.com/bitnami/charts
name: tomcat
sources:
- https://github.com/bitnami/charts/tree/main/bitnami/tomcat
urls:
- assets/bitnami/tomcat-10.9.3.tgz
version: 10.9.3
- annotations: - annotations:
catalog.cattle.io/certified: partner catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: Apache Tomcat catalog.cattle.io/display-name: Apache Tomcat