Charts CI
``` Updated: argo/argo-cd: - 5.33.4 avesha/kubeslice-controller: - 0.10.0 avesha/kubeslice-worker: - 0.10.0 bitnami/airflow: - 14.2.1 bitnami/postgresql: - 12.5.2 bitnami/spark: - 6.6.1 bitnami/zookeeper: - 11.4.1 cockroach-labs/cockroachdb: - 11.0.0 datadog/datadog: - 3.29.1 instana/instana-agent: - 1.2.59 kubecost/cost-analyzer: - 1.103.3 mongodb/community-operator: - 0.8.0 new-relic/nri-bundle: - 5.0.13 pixie/pixie-operator-chart: - 0.1.0 speedscale/speedscale-operator: - 1.3.2 ```pull/759/head
parent
de6fcfc7f7
commit
5bc0982c98
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -1,6 +1,6 @@
|
|||
dependencies:
|
||||
- name: redis-ha
|
||||
repository: https://dandydeveloper.github.io/charts/
|
||||
version: 4.22.5
|
||||
digest: sha256:d2e927511e515fb862f23dd413ee3a356c855d808f6f9ad1d345ee62b8c7ea16
|
||||
generated: "2023-03-30T08:25:32.738257836+02:00"
|
||||
version: 4.23.0
|
||||
digest: sha256:589f9972fbdf36194d443c9d3be2a1747f43e03c435fc48004cc0cbe6b3c6e3c
|
||||
generated: "2023-05-15T19:25:26.049618+09:00"
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
annotations:
|
||||
artifacthub.io/changes: |
|
||||
- kind: added
|
||||
description: Ability to add additional labels to all CRDs via .Values.crds.additionalLabels
|
||||
- kind: changed
|
||||
description: Update redis-ha to v4.23.0
|
||||
artifacthub.io/signKey: |
|
||||
fingerprint: 2B8F22F57260EFA67BE1C5824B11F800CD9D2252
|
||||
url: https://argoproj.github.io/argo-helm/pgp_keys.asc
|
||||
|
@ -10,12 +10,12 @@ annotations:
|
|||
catalog.cattle.io/kube-version: '>=1.22.0-0'
|
||||
catalog.cattle.io/release-name: argo-cd
|
||||
apiVersion: v2
|
||||
appVersion: v2.7.1
|
||||
appVersion: v2.7.2
|
||||
dependencies:
|
||||
- condition: redis-ha.enabled
|
||||
name: redis-ha
|
||||
repository: file://./charts/redis-ha
|
||||
version: 4.22.5
|
||||
version: 4.23.0
|
||||
description: A Helm chart for Argo CD, a declarative, GitOps continuous delivery tool
|
||||
for Kubernetes.
|
||||
home: https://github.com/argoproj/argo-helm
|
||||
|
@ -32,4 +32,4 @@ name: argo-cd
|
|||
sources:
|
||||
- https://github.com/argoproj/argo-helm/tree/main/charts/argo-cd
|
||||
- https://github.com/argoproj/argo-cd
|
||||
version: 5.33.1
|
||||
version: 5.33.4
|
||||
|
|
|
@ -1024,6 +1024,19 @@ If you want to use an existing Redis (eg. a managed service from a cloud provide
|
|||
|-----|------|---------|-------------|
|
||||
| applicationSet.affinity | object | `{}` (defaults to global.affinity preset) | Assign custom [affinity] rules |
|
||||
| applicationSet.args | object | `{}` | DEPRECATED - ApplicationSet controller command line flags |
|
||||
| applicationSet.certificate.additionalHosts | list | `[]` | Certificate Subject Alternate Names (SANs) |
|
||||
| applicationSet.certificate.domain | string | `"argocd.example.com"` | Certificate primary domain (commonName) |
|
||||
| applicationSet.certificate.duration | string | `""` (defaults to 2160h = 90d if not specified) | The requested 'duration' (i.e. lifetime) of the certificate. |
|
||||
| applicationSet.certificate.enabled | bool | `false` | Deploy a Certificate resource (requires cert-manager) |
|
||||
| applicationSet.certificate.issuer.group | string | `""` | Certificate issuer group. Set if using an external issuer. Eg. `cert-manager.io` |
|
||||
| applicationSet.certificate.issuer.kind | string | `""` | Certificate issuer kind. Either `Issuer` or `ClusterIssuer` |
|
||||
| applicationSet.certificate.issuer.name | string | `""` | Certificate issuer name. Eg. `letsencrypt` |
|
||||
| applicationSet.certificate.privateKey.algorithm | string | `"RSA"` | Algorithm used to generate certificate private key. One of: `RSA`, `Ed25519` or `ECDSA` |
|
||||
| applicationSet.certificate.privateKey.encoding | string | `"PKCS1"` | The private key cryptography standards (PKCS) encoding for private key. Either: `PCKS1` or `PKCS8` |
|
||||
| applicationSet.certificate.privateKey.rotationPolicy | string | `"Never"` | Rotation policy of private key when certificate is re-issued. Either: `Never` or `Always` |
|
||||
| applicationSet.certificate.privateKey.size | int | `2048` | Key bit size of the private key. If algorithm is set to `Ed25519`, size is ignored. |
|
||||
| applicationSet.certificate.renewBefore | string | `""` (defaults to 360h = 15d if not specified) | How long before the expiry a certificate should be renewed. |
|
||||
| applicationSet.certificate.secretName | string | `"argocd-application-controller-tls"` | The name of the Secret that will be automatically created and managed by this Certificate resource |
|
||||
| applicationSet.containerPorts.metrics | int | `8080` | Metrics container port |
|
||||
| applicationSet.containerPorts.probe | int | `8081` | Probe container port |
|
||||
| applicationSet.containerPorts.webhook | int | `7000` | Webhook container port |
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
apiVersion: v2
|
||||
appVersion: 7.0.4
|
||||
appVersion: 7.0.9
|
||||
description: This Helm chart provides a highly available Redis implementation with
|
||||
a master/slave configuration and uses Sentinel sidecars for failover management
|
||||
home: http://redis.io/
|
||||
|
@ -18,4 +18,4 @@ sources:
|
|||
- https://redis.io/download
|
||||
- https://github.com/DandyDeveloper/charts/blob/master/charts/redis-ha
|
||||
- https://github.com/oliver006/redis_exporter
|
||||
version: 4.22.5
|
||||
version: 4.23.0
|
||||
|
|
|
@ -9,7 +9,7 @@ To connect to your Redis server:
|
|||
|
||||
2. Connect to the Redis master pod that you can use as a client. By default the {{ template "redis-ha.fullname" . }}-server-0 pod is configured as the master:
|
||||
|
||||
kubectl exec -it {{ template "redis-ha.fullname" . }}-server-0 sh -n {{ .Release.Namespace }}
|
||||
kubectl exec -it {{ template "redis-ha.fullname" . }}-server-0 -n {{ .Release.Namespace }} -c redis -- sh
|
||||
|
||||
3. Connect using the Redis CLI (inside container):
|
||||
|
||||
|
@ -17,7 +17,7 @@ To connect to your Redis server:
|
|||
{{- else }}
|
||||
1. Run a Redis pod that you can use as a client:
|
||||
|
||||
kubectl exec -it {{ template "redis-ha.fullname" . }}-server-0 sh -n {{ .Release.Namespace }}
|
||||
kubectl exec -it {{ template "redis-ha.fullname" . }}-server-0 -n {{ .Release.Namespace }} -c redis -- sh
|
||||
|
||||
2. Connect using the Redis CLI:
|
||||
|
||||
|
|
|
@ -80,7 +80,7 @@
|
|||
sentinel_get_master() {
|
||||
set +e
|
||||
if [ "$SENTINEL_PORT" -eq 0 ]; then
|
||||
redis-cli -h "${SERVICE}" -p "${SENTINEL_TLS_PORT}" {{ if .Values.sentinel.auth }} -a "${SENTINELAUTH}" --no-auth-warning{{ end }} {{ if ne (default "yes" .Values.sentinel.authClients) "no"}} --tls --cacert /tls-certs/{{ .Values.tls.caCertFile }} --cert /tls-certs/{{ .Values.tls.certFile }} --key /tls-certs/{{ .Values.tls.keyFile }}{{ end }} sentinel get-master-addr-by-name "${MASTER_GROUP}" |\
|
||||
redis-cli -h "${SERVICE}" -p "${SENTINEL_TLS_PORT}" {{ if .Values.sentinel.auth }} -a "${SENTINELAUTH}" --no-auth-warning{{ end }} --tls --cacert /tls-certs/{{ .Values.tls.caCertFile }} {{ if ne (default "yes" .Values.sentinel.authClients) "no"}} --cert /tls-certs/{{ .Values.tls.certFile }} --key /tls-certs/{{ .Values.tls.keyFile }}{{ end }} sentinel get-master-addr-by-name "${MASTER_GROUP}" |\
|
||||
grep -E '((^\s*((([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))\s*$)|(^\s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:)))(%.+)?s*$))'
|
||||
else
|
||||
redis-cli -h "${SERVICE}" -p "${SENTINEL_PORT}" {{ if .Values.sentinel.auth }} -a "${SENTINELAUTH}" --no-auth-warning{{ end }} sentinel get-master-addr-by-name "${MASTER_GROUP}" |\
|
||||
|
@ -189,7 +189,7 @@
|
|||
redis_ping() {
|
||||
set +e
|
||||
if [ "$REDIS_PORT" -eq 0 ]; then
|
||||
redis-cli -h "${MASTER}"{{ if .Values.auth }} -a "${AUTH}" --no-auth-warning{{ end }} -p "${REDIS_TLS_PORT}" {{ if ne (default "yes" .Values.sentinel.authClients) "no"}} --tls --cacert /tls-certs/{{ .Values.tls.caCertFile }} --cert /tls-certs/{{ .Values.tls.certFile }} --key /tls-certs/{{ .Values.tls.keyFile }}{{ end }} ping
|
||||
redis-cli -h "${MASTER}"{{ if .Values.auth }} -a "${AUTH}" --no-auth-warning{{ end }} -p "${REDIS_TLS_PORT}" --tls --cacert /tls-certs/{{ .Values.tls.caCertFile }} {{ if ne (default "yes" .Values.sentinel.authClients) "no"}} --cert /tls-certs/{{ .Values.tls.certFile }} --key /tls-certs/{{ .Values.tls.keyFile }}{{ end }} ping
|
||||
else
|
||||
redis-cli -h "${MASTER}"{{ if .Values.auth }} -a "${AUTH}" --no-auth-warning{{ end }} -p "${REDIS_PORT}" ping
|
||||
fi
|
||||
|
@ -224,7 +224,7 @@
|
|||
|
||||
if [ "$SENTINEL_PORT" -eq 0 ]; then
|
||||
echo " on sentinel (${SERVICE}:${SENTINEL_TLS_PORT}), sentinel grp (${MASTER_GROUP})"
|
||||
if redis-cli -h "${SERVICE}" -p "${SENTINEL_TLS_PORT}" {{ if .Values.sentinel.auth }} -a "${SENTINELAUTH}" --no-auth-warning{{ end }} {{ if ne (default "yes" .Values.sentinel.authClients) "no"}} --tls --cacert /tls-certs/{{ .Values.tls.caCertFile }} --cert /tls-certs/{{ .Values.tls.certFile }} --key /tls-certs/{{ .Values.tls.keyFile }}{{ end }} sentinel failover "${MASTER_GROUP}" | grep -q 'NOGOODSLAVE' ; then
|
||||
if redis-cli -h "${SERVICE}" -p "${SENTINEL_TLS_PORT}" {{ if .Values.sentinel.auth }} -a "${SENTINELAUTH}" --no-auth-warning{{ end }} --tls --cacert /tls-certs/{{ .Values.tls.caCertFile }} {{ if ne (default "yes" .Values.sentinel.authClients) "no"}} --cert /tls-certs/{{ .Values.tls.certFile }} --key /tls-certs/{{ .Values.tls.keyFile }}{{ end }} sentinel failover "${MASTER_GROUP}" | grep -q 'NOGOODSLAVE' ; then
|
||||
echo " $(date) Failover returned with 'NOGOODSLAVE'"
|
||||
echo "Setting defaults for this pod.."
|
||||
setup_defaults
|
||||
|
@ -361,7 +361,7 @@
|
|||
|
||||
{{- define "trigger-failover-if-master.sh" }}
|
||||
{{- if or (eq (int .Values.redis.port) 0) (eq (int .Values.sentinel.port) 0) }}
|
||||
TLS_CLIENT_OPTION="--tls --cacert /tls-certs/{{ .Values.tls.caCertFile }} --cert /tls-certs/{{ .Values.tls.certFile }} --key /tls-certs/{{ .Values.tls.keyFile }}"
|
||||
TLS_CLIENT_OPTION="--tls --cacert /tls-certs/{{ .Values.tls.caCertFile }}{{ if ne (default "yes" .Values.sentinel.authClients) "no"}} --cert /tls-certs/{{ .Values.tls.certFile }} --key /tls-certs/{{ .Values.tls.keyFile }}{{end}}"
|
||||
{{- end }}
|
||||
get_redis_role() {
|
||||
is_master=$(
|
||||
|
@ -422,7 +422,7 @@
|
|||
redis_role() {
|
||||
set +e
|
||||
if [ "$REDIS_PORT" -eq 0 ]; then
|
||||
ROLE=$(redis-cli {{ if .Values.auth }} -a "${AUTH}" --no-auth-warning{{ end }} -p "${REDIS_TLS_PORT}" {{ if ne (default "yes" .Values.sentinel.authClients) "no"}} --tls --cacert /tls-certs/{{ .Values.tls.caCertFile }} --cert /tls-certs/{{ .Values.tls.certFile }} --key /tls-certs/{{ .Values.tls.keyFile }}{{ end }} info | grep role | sed 's/role://' | sed 's/\r//')
|
||||
ROLE=$(redis-cli {{ if .Values.auth }} -a "${AUTH}" --no-auth-warning{{ end }} -p "${REDIS_TLS_PORT}" --tls --cacert /tls-certs/{{ .Values.tls.caCertFile }} {{ if ne (default "yes" .Values.sentinel.authClients) "no"}} --cert /tls-certs/{{ .Values.tls.certFile }} --key /tls-certs/{{ .Values.tls.keyFile }}{{ end }} info | grep role | sed 's/role://' | sed 's/\r//')
|
||||
else
|
||||
ROLE=$(redis-cli {{ if .Values.auth }} -a "${AUTH}" --no-auth-warning{{ end }} -p "${REDIS_PORT}" info | grep role | sed 's/role://' | sed 's/\r//')
|
||||
fi
|
||||
|
@ -432,7 +432,7 @@
|
|||
identify_redis_master() {
|
||||
set +e
|
||||
if [ "$REDIS_PORT" -eq 0 ]; then
|
||||
REDIS_MASTER=$(redis-cli {{ if .Values.auth }} -a "${AUTH}" --no-auth-warning{{ end }} -p "${REDIS_TLS_PORT}" {{ if ne (default "yes" .Values.sentinel.authClients) "no"}} --tls --cacert /tls-certs/{{ .Values.tls.caCertFile }} --cert /tls-certs/{{ .Values.tls.certFile }} --key /tls-certs/{{ .Values.tls.keyFile }}{{ end }} info | grep master_host | sed 's/master_host://' | sed 's/\r//')
|
||||
REDIS_MASTER=$(redis-cli {{ if .Values.auth }} -a "${AUTH}" --no-auth-warning{{ end }} -p "${REDIS_TLS_PORT}" --tls --cacert /tls-certs/{{ .Values.tls.caCertFile }} {{ if ne (default "yes" .Values.sentinel.authClients) "no"}} --cert /tls-certs/{{ .Values.tls.certFile }} --key /tls-certs/{{ .Values.tls.keyFile }}{{ end }} info | grep master_host | sed 's/master_host://' | sed 's/\r//')
|
||||
else
|
||||
REDIS_MASTER=$(redis-cli {{ if .Values.auth }} -a "${AUTH}" --no-auth-warning{{ end }} -p "${REDIS_PORT}" info | grep master_host | sed 's/master_host://' | sed 's/\r//')
|
||||
fi
|
||||
|
@ -444,7 +444,7 @@
|
|||
sh /readonly-config/init.sh
|
||||
|
||||
if [ "$REDIS_PORT" -eq 0 ]; then
|
||||
echo "shutdown" | redis-cli {{ if .Values.auth }} -a "${AUTH}" --no-auth-warning{{ end }} -p "${REDIS_TLS_PORT}" {{ if ne (default "yes" .Values.sentinel.authClients) "no"}} --tls --cacert /tls-certs/{{ .Values.tls.caCertFile }} --cert /tls-certs/{{ .Values.tls.certFile }} --key /tls-certs/{{ .Values.tls.keyFile }}{{ end }}
|
||||
echo "shutdown" | redis-cli {{ if .Values.auth }} -a "${AUTH}" --no-auth-warning{{ end }} -p "${REDIS_TLS_PORT}" --tls --cacert /tls-certs/{{ .Values.tls.caCertFile }} {{ if ne (default "yes" .Values.sentinel.authClients) "no"}} --cert /tls-certs/{{ .Values.tls.certFile }} --key /tls-certs/{{ .Values.tls.keyFile }}{{ end }}
|
||||
else
|
||||
echo "shutdown" | redis-cli {{ if .Values.auth }} -a "${AUTH}" --no-auth-warning{{ end }} -p "${REDIS_PORT}"
|
||||
fi
|
||||
|
@ -621,7 +621,7 @@
|
|||
|
||||
{{- define "redis_liveness.sh" }}
|
||||
{{- if not (ne (int .Values.sentinel.port) 0) }}
|
||||
TLS_CLIENT_OPTION="--tls --cacert /tls-certs/{{ .Values.tls.caCertFile }} --cert /tls-certs/{{ .Values.tls.certFile }} --key /tls-certs/{{ .Values.tls.keyFile }}"
|
||||
TLS_CLIENT_OPTION="--tls --cacert /tls-certs/{{ .Values.tls.caCertFile }}{{ if ne (default "yes" .Values.sentinel.authClients) "no"}} --cert /tls-certs/{{ .Values.tls.certFile }} --key /tls-certs/{{ .Values.tls.keyFile }}{{end}}"
|
||||
{{- end }}
|
||||
response=$(
|
||||
redis-cli \
|
||||
|
@ -645,7 +645,7 @@
|
|||
|
||||
{{- define "redis_readiness.sh" }}
|
||||
{{- if not (ne (int .Values.sentinel.port) 0) }}
|
||||
TLS_CLIENT_OPTION="--tls --cacert /tls-certs/{{ .Values.tls.caCertFile }} --cert /tls-certs/{{ .Values.tls.certFile }} --key /tls-certs/{{ .Values.tls.keyFile }}"
|
||||
TLS_CLIENT_OPTION="--tls --cacert /tls-certs/{{ .Values.tls.caCertFile }}{{ if ne (default "yes" .Values.sentinel.authClients) "no"}} --cert /tls-certs/{{ .Values.tls.certFile }} --key /tls-certs/{{ .Values.tls.keyFile }}{{end}}"
|
||||
{{- end }}
|
||||
response=$(
|
||||
redis-cli \
|
||||
|
@ -669,7 +669,7 @@
|
|||
|
||||
{{- define "sentinel_liveness.sh" }}
|
||||
{{- if not (ne (int .Values.sentinel.port) 0) }}
|
||||
TLS_CLIENT_OPTION="--tls --cacert /tls-certs/{{ .Values.tls.caCertFile }} --cert /tls-certs/{{ .Values.tls.certFile }} --key /tls-certs/{{ .Values.tls.keyFile }}"
|
||||
TLS_CLIENT_OPTION="--tls --cacert /tls-certs/{{ .Values.tls.caCertFile }}{{ if ne (default "yes" .Values.sentinel.authClients) "no"}} --cert /tls-certs/{{ .Values.tls.certFile }} --key /tls-certs/{{ .Values.tls.keyFile }}{{end}}"
|
||||
{{- end }}
|
||||
response=$(
|
||||
redis-cli \
|
||||
|
|
|
@ -15,12 +15,16 @@ metadata:
|
|||
{{ $key }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
annotations:
|
||||
{{- if (semverCompare "<=1.10-0" $.Capabilities.KubeVersion.GitVersion) }}
|
||||
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
|
||||
{{- end }}
|
||||
{{- if $root.Values.serviceAnnotations }}
|
||||
{{ toYaml $root.Values.serviceAnnotations | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if (semverCompare ">=1.11-0" $.Capabilities.KubeVersion.GitVersion) }}
|
||||
publishNotReadyAddresses: true
|
||||
{{- end }}
|
||||
type: ClusterIP
|
||||
ports:
|
||||
{{- if ne (int $root.Values.redis.port) 0 }}
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
##
|
||||
image:
|
||||
repository: redis
|
||||
tag: 7.0.5-alpine3.16
|
||||
tag: 7.0.9-alpine3.17
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
## Reference to one or more secrets to be used when pulling images
|
||||
|
@ -81,7 +81,7 @@ haproxy:
|
|||
replicas: 3
|
||||
image:
|
||||
repository: haproxy
|
||||
tag: 2.6.4
|
||||
tag: 2.6.9
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
## Custom labels for the haproxy pod
|
||||
|
|
|
@ -0,0 +1,33 @@
|
|||
{{- if .Values.applicationSet.certificate.enabled -}}
|
||||
apiVersion: {{ include "argo-cd.apiVersion.cert-manager" . }}
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: {{ template "argo-cd.applicationSet.fullname" . }}
|
||||
namespace: {{ .Release.Namespace | quote }}
|
||||
labels:
|
||||
{{- include "argo-cd.labels" (dict "context" . "component" .Values.applicationSet.name "name" .Values.applicationSet.name) | nindent 4 }}
|
||||
spec:
|
||||
secretName: {{ .Values.applicationSet.certificate.secretName }}
|
||||
commonName: {{ .Values.applicationSet.certificate.domain | quote }}
|
||||
dnsNames:
|
||||
- {{ .Values.applicationSet.certificate.domain | quote }}
|
||||
{{- range .Values.applicationSet.certificate.additionalHosts }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
{{- with .Values.applicationSet.certificate.duration }}
|
||||
duration: {{ . | quote }}
|
||||
{{- end }}
|
||||
{{- with .Values.applicationSet.certificate.renewBefore }}
|
||||
renewBefore: {{ . | quote }}
|
||||
{{- end }}
|
||||
issuerRef:
|
||||
{{- with .Values.applicationSet.certificate.issuer.group }}
|
||||
group: {{ . | quote }}
|
||||
{{- end }}
|
||||
kind: {{ .Values.applicationSet.certificate.issuer.kind | quote }}
|
||||
name: {{ .Values.applicationSet.certificate.issuer.name | quote }}
|
||||
{{- with .Values.applicationSet.certificate.privateKey }}
|
||||
privateKey:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -2623,6 +2623,45 @@ applicationSet:
|
|||
# hosts:
|
||||
# - argocd-applicationset.example.com
|
||||
|
||||
# TLS certificate configuration via cert-manager
|
||||
## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/tls/#tls-configuration
|
||||
certificate:
|
||||
# -- Deploy a Certificate resource (requires cert-manager)
|
||||
enabled: false
|
||||
# -- The name of the Secret that will be automatically created and managed by this Certificate resource
|
||||
secretName: argocd-application-controller-tls
|
||||
# -- Certificate primary domain (commonName)
|
||||
domain: argocd.example.com
|
||||
# -- Certificate Subject Alternate Names (SANs)
|
||||
additionalHosts: []
|
||||
# -- The requested 'duration' (i.e. lifetime) of the certificate.
|
||||
# @default -- `""` (defaults to 2160h = 90d if not specified)
|
||||
## Ref: https://cert-manager.io/docs/usage/certificate/#renewal
|
||||
duration: ""
|
||||
# -- How long before the expiry a certificate should be renewed.
|
||||
# @default -- `""` (defaults to 360h = 15d if not specified)
|
||||
## Ref: https://cert-manager.io/docs/usage/certificate/#renewal
|
||||
renewBefore: ""
|
||||
# Certificate issuer
|
||||
## Ref: https://cert-manager.io/docs/concepts/issuer
|
||||
issuer:
|
||||
# -- Certificate issuer group. Set if using an external issuer. Eg. `cert-manager.io`
|
||||
group: ""
|
||||
# -- Certificate issuer kind. Either `Issuer` or `ClusterIssuer`
|
||||
kind: ""
|
||||
# -- Certificate issuer name. Eg. `letsencrypt`
|
||||
name: ""
|
||||
# Private key of the certificate
|
||||
privateKey:
|
||||
# -- Rotation policy of private key when certificate is re-issued. Either: `Never` or `Always`
|
||||
rotationPolicy: Never
|
||||
# -- The private key cryptography standards (PKCS) encoding for private key. Either: `PCKS1` or `PKCS8`
|
||||
encoding: PKCS1
|
||||
# -- Algorithm used to generate certificate private key. One of: `RSA`, `Ed25519` or `ECDSA`
|
||||
algorithm: RSA
|
||||
# -- Key bit size of the private key. If algorithm is set to `Ed25519`, size is ignored.
|
||||
size: 2048
|
||||
|
||||
## Notifications controller
|
||||
notifications:
|
||||
# -- Enable notifications controller
|
||||
|
|
|
@ -5,7 +5,7 @@ annotations:
|
|||
catalog.cattle.io/namespace: kubeslice-controller
|
||||
catalog.cattle.io/release-name: kubeslice-controller
|
||||
apiVersion: v2
|
||||
appVersion: 0.5.0
|
||||
appVersion: 0.10.0
|
||||
description: Multi cloud networking (MCN), multi cluster, hybrid cloud networking
|
||||
tool for efficient, secure, policy-enforced connectivity and true multi-tenancy
|
||||
capabilities. KubeSlice enables enterprise platform teams to reduce infrastructure
|
||||
|
@ -36,4 +36,4 @@ keywords:
|
|||
kubeVersion: '>= 1.19.0-0'
|
||||
name: kubeslice-controller
|
||||
type: application
|
||||
version: 0.5.0
|
||||
version: 0.10.0
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
# Kubeslice Enterprise Controller Helm Charts
|
||||
|
||||
## Prerequisites
|
||||
📖 Follow the overview and registration [documentation](https://docs.avesha.io/documentation/enterprise/0.5.0/deployment-partners/deploying-kubeslice-on-rancher/)
|
||||
📖 Follow the overview and registration [documentation](https://docs.avesha.io/documentation/enterprise/0.10.0/deployment-partners/deploying-kubeslice-on-rancher/)
|
||||
|
||||
- Create and configure the controller cluster following instructions in the prerequisites section [documentation](https://docs.avesha.io/documentation/enterprise/0.5.0/deployment-partners/deploying-kubeslice-on-rancher/installing-the-kubeslice-controller-on-rancher)
|
||||
- Create and configure the controller cluster following instructions in the prerequisites section [documentation](https://docs.avesha.io/documentation/enterprise/0.10.0/deployment-partners/deploying-kubeslice-on-rancher/installing-the-kubeslice-controller-on-rancher)
|
||||
- Copy the chart version from the upper right hand section of this page [VERSION parameter need during install and upgrade]
|
||||
- Click on the download chart link from the upper right hand section of this page, save it to location available from command prompt
|
||||
- Untar the chart to get the values.yaml file, update values.yaml with the follwing information
|
||||
- cluster end point [documentation](https://docs.avesha.io/documentation/enterprise/0.5.0/deployment-partners/deploying-kubeslice-on-rancher/installing-the-kubeslice-controller-on-rancher#getting-the-controller-cluster-endpoint)
|
||||
- cluster end point [documentation](https://docs.avesha.io/documentation/enterprise/0.10.0/deployment-partners/deploying-kubeslice-on-rancher/installing-the-kubeslice-controller-on-rancher#getting-the-controller-cluster-endpoint)
|
||||
- helm repository username, password and email [From registration]
|
||||
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
questions:
|
||||
-
|
||||
default: ""
|
||||
description: "https://docs.avesha.io/documentation/enterprise/0.5.0/deployment-partners/deploying-kubeslice-on-rancher/#registering-to-access-the-enterprise-helm-chart"
|
||||
description: "https://docs.avesha.io/documentation/enterprise/0.10.0/deployment-partners/deploying-kubeslice-on-rancher/#registering-to-access-the-enterprise-helm-chart"
|
||||
group: "Global Settings"
|
||||
label: "Registered Username"
|
||||
required: true
|
||||
|
@ -18,7 +18,7 @@ questions:
|
|||
variable: imagePullSecrets.password
|
||||
-
|
||||
default: ""
|
||||
description: "https://docs.avesha.io/documentation/enterprise/0.5.0/deployment-partners/deploying-kubeslice-on-rancher/installing-the-kubeslice-controller-on-rancher/#getting-the-controller-cluster-endpoint"
|
||||
description: "https://docs.avesha.io/documentation/enterprise/0.10.0/deployment-partners/deploying-kubeslice-on-rancher/installing-the-kubeslice-controller-on-rancher/#getting-the-controller-cluster-endpoint"
|
||||
group: "Controller Settings"
|
||||
label: "Controller Endpoint"
|
||||
required: true
|
||||
|
|
|
@ -0,0 +1,456 @@
|
|||
{{- $ca := genCA "kubeslice-controller-webhook-service" 3650 -}}
|
||||
{{- $cn := printf "kubeslice-controller-webhook-service" -}}
|
||||
{{- $altName1 := printf "%s.%s.svc" $cn .Release.Namespace }}
|
||||
{{- $altName2 := printf "%s.%s.svc.cluster.local" $cn .Release.Namespace }}
|
||||
{{- $cert := genSignedCert $cn nil (list $altName1 $altName2) 3650 $ca -}}
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: webhook-server-cert-secret
|
||||
namespace: {{ .Release.Namespace }}
|
||||
type: Opaque
|
||||
data:
|
||||
ca.crt: {{ $ca.Cert | b64enc }}
|
||||
tls.key: {{ $cert.Key | b64enc }}
|
||||
tls.crt: {{ $cert.Cert | b64enc }}
|
||||
|
||||
---
|
||||
apiVersion: admissionregistration.k8s.io/v1
|
||||
kind: ValidatingWebhookConfiguration
|
||||
metadata:
|
||||
name: kubeslice-controller-validating-webhook-configuration
|
||||
webhooks:
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
caBundle: {{ $ca.Cert | b64enc }}
|
||||
service:
|
||||
name: kubeslice-controller-webhook-service
|
||||
namespace: kubeslice-controller
|
||||
path: /validate-controller-kubeslice-io-v1alpha1-slicenodeaffinity
|
||||
failurePolicy: Fail
|
||||
name: vslicenodeaffinity.kb.io
|
||||
rules:
|
||||
- apiGroups:
|
||||
- controller.kubeslice.io
|
||||
apiVersions:
|
||||
- v1alpha1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- slicenodeaffinities
|
||||
sideEffects: None
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
caBundle: {{ $ca.Cert | b64enc }}
|
||||
service:
|
||||
name: kubeslice-controller-webhook-service
|
||||
namespace: kubeslice-controller
|
||||
path: /validate-controller-kubeslice-io-v1alpha1-sliceresourcequotaconfig
|
||||
failurePolicy: Fail
|
||||
name: vsliceresourcequotaconfig.kb.io
|
||||
rules:
|
||||
- apiGroups:
|
||||
- controller.kubeslice.io
|
||||
apiVersions:
|
||||
- v1alpha1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
- DELETE
|
||||
resources:
|
||||
- sliceresourcequotaconfigs
|
||||
sideEffects: None
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
caBundle: {{ $ca.Cert | b64enc }}
|
||||
service:
|
||||
name: kubeslice-controller-webhook-service
|
||||
namespace: kubeslice-controller
|
||||
path: /validate-controller-kubeslice-io-v1alpha1-slicerolebinding
|
||||
failurePolicy: Fail
|
||||
name: vslicerolebinding.kb.io
|
||||
rules:
|
||||
- apiGroups:
|
||||
- controller.kubeslice.io
|
||||
apiVersions:
|
||||
- v1alpha1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
- DELETE
|
||||
resources:
|
||||
- slicerolebindings
|
||||
sideEffects: None
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
caBundle: {{ $ca.Cert | b64enc }}
|
||||
service:
|
||||
name: kubeslice-controller-webhook-service
|
||||
namespace: kubeslice-controller
|
||||
path: /validate-controller-kubeslice-io-v1alpha1-sliceroletemplate
|
||||
failurePolicy: Fail
|
||||
name: vsliceroletemplate.kb.io
|
||||
rules:
|
||||
- apiGroups:
|
||||
- controller.kubeslice.io
|
||||
apiVersions:
|
||||
- v1alpha1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
- DELETE
|
||||
resources:
|
||||
- sliceroletemplates
|
||||
sideEffects: None
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
caBundle: {{ $ca.Cert | b64enc }}
|
||||
service:
|
||||
name: kubeslice-controller-webhook-service
|
||||
namespace: kubeslice-controller
|
||||
path: /validate-controller-kubeslice-io-v1alpha1-cluster
|
||||
failurePolicy: Fail
|
||||
name: vcluster.kb.io
|
||||
rules:
|
||||
- apiGroups:
|
||||
- controller.kubeslice.io
|
||||
apiVersions:
|
||||
- v1alpha1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
- DELETE
|
||||
resources:
|
||||
- clusters
|
||||
sideEffects: None
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
caBundle: {{ $ca.Cert | b64enc }}
|
||||
service:
|
||||
name: kubeslice-controller-webhook-service
|
||||
namespace: kubeslice-controller
|
||||
path: /validate-controller-kubeslice-io-v1alpha1-project
|
||||
failurePolicy: Fail
|
||||
name: vproject.kb.io
|
||||
rules:
|
||||
- apiGroups:
|
||||
- controller.kubeslice.io
|
||||
apiVersions:
|
||||
- v1alpha1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
- DELETE
|
||||
resources:
|
||||
- projects
|
||||
sideEffects: None
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
caBundle: {{ $ca.Cert | b64enc }}
|
||||
service:
|
||||
name: kubeslice-controller-webhook-service
|
||||
namespace: kubeslice-controller
|
||||
path: /validate-controller-kubeslice-io-v1alpha1-serviceexportconfig
|
||||
failurePolicy: Fail
|
||||
name: vserviceexportconfig.kb.io
|
||||
rules:
|
||||
- apiGroups:
|
||||
- controller.kubeslice.io
|
||||
apiVersions:
|
||||
- v1alpha1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- serviceexportconfigs
|
||||
sideEffects: None
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
caBundle: {{ $ca.Cert | b64enc }}
|
||||
service:
|
||||
name: kubeslice-controller-webhook-service
|
||||
namespace: kubeslice-controller
|
||||
path: /validate-controller-kubeslice-io-v1alpha1-sliceconfig
|
||||
failurePolicy: Fail
|
||||
name: vsliceconfig.kb.io
|
||||
rules:
|
||||
- apiGroups:
|
||||
- controller.kubeslice.io
|
||||
apiVersions:
|
||||
- v1alpha1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
- DELETE
|
||||
resources:
|
||||
- sliceconfigs
|
||||
sideEffects: None
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
caBundle: {{ $ca.Cert | b64enc }}
|
||||
service:
|
||||
name: kubeslice-controller-webhook-service
|
||||
namespace: kubeslice-controller
|
||||
path: /validate-controller-kubeslice-io-v1alpha1-sliceqosconfig
|
||||
failurePolicy: Fail
|
||||
name: vsliceqosconfig.kb.io
|
||||
rules:
|
||||
- apiGroups:
|
||||
- controller.kubeslice.io
|
||||
apiVersions:
|
||||
- v1alpha1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
- DELETE
|
||||
resources:
|
||||
- sliceqosconfigs
|
||||
sideEffects: None
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
caBundle: {{ $ca.Cert | b64enc }}
|
||||
service:
|
||||
name: kubeslice-controller-webhook-service
|
||||
namespace: kubeslice-controller
|
||||
path: /validate-worker-kubeslice-io-v1alpha1-workersliceconfig
|
||||
failurePolicy: Fail
|
||||
name: vworkersliceconfig.kb.io
|
||||
rules:
|
||||
- apiGroups:
|
||||
- worker.kubeslice.io
|
||||
apiVersions:
|
||||
- v1alpha1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- workersliceconfigs
|
||||
sideEffects: None
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
caBundle: {{ $ca.Cert | b64enc }}
|
||||
service:
|
||||
name: kubeslice-controller-webhook-service
|
||||
namespace: kubeslice-controller
|
||||
path: /validate-worker-kubeslice-io-v1alpha1-workerslicegateway
|
||||
failurePolicy: Fail
|
||||
name: vworkerslicegateway.kb.io
|
||||
rules:
|
||||
- apiGroups:
|
||||
- worker.kubeslice.io
|
||||
apiVersions:
|
||||
- v1alpha1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- workerslicegateways
|
||||
sideEffects: None
|
||||
---
|
||||
apiVersion: admissionregistration.k8s.io/v1
|
||||
kind: MutatingWebhookConfiguration
|
||||
metadata:
|
||||
name: kubeslice-controller-mutating-webhook-configuration
|
||||
webhooks:
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
caBundle: {{ $ca.Cert | b64enc }}
|
||||
service:
|
||||
name: kubeslice-controller-webhook-service
|
||||
namespace: kubeslice-controller
|
||||
path: /mutate-controller-kubeslice-io-v1alpha1-sliceresourcequotaconfig
|
||||
failurePolicy: Fail
|
||||
name: msliceresourcequotaconfig.kb.io
|
||||
rules:
|
||||
- apiGroups:
|
||||
- controller.kubeslice.io
|
||||
apiVersions:
|
||||
- v1alpha1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- sliceresourcequotaconfigs
|
||||
sideEffects: None
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
caBundle: {{ $ca.Cert | b64enc }}
|
||||
service:
|
||||
name: kubeslice-controller-webhook-service
|
||||
namespace: kubeslice-controller
|
||||
path: /mutate-controller-kubeslice-io-v1alpha1-slicerolebinding
|
||||
failurePolicy: Fail
|
||||
name: mslicerolebinding.kb.io
|
||||
rules:
|
||||
- apiGroups:
|
||||
- controller.kubeslice.io
|
||||
apiVersions:
|
||||
- v1alpha1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- slicerolebindings
|
||||
sideEffects: None
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
caBundle: {{ $ca.Cert | b64enc }}
|
||||
service:
|
||||
name: kubeslice-controller-webhook-service
|
||||
namespace: kubeslice-controller
|
||||
path: /mutate-controller-kubeslice-io-v1alpha1-cluster
|
||||
failurePolicy: Fail
|
||||
name: mcluster.kb.io
|
||||
rules:
|
||||
- apiGroups:
|
||||
- controller.kubeslice.io
|
||||
apiVersions:
|
||||
- v1alpha1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- clusters
|
||||
sideEffects: None
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
caBundle: {{ $ca.Cert | b64enc }}
|
||||
service:
|
||||
name: kubeslice-controller-webhook-service
|
||||
namespace: kubeslice-controller
|
||||
path: /mutate-controller-kubeslice-io-v1alpha1-project
|
||||
failurePolicy: Fail
|
||||
name: mproject.kb.io
|
||||
rules:
|
||||
- apiGroups:
|
||||
- controller.kubeslice.io
|
||||
apiVersions:
|
||||
- v1alpha1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- projects
|
||||
sideEffects: None
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
caBundle: {{ $ca.Cert | b64enc }}
|
||||
service:
|
||||
name: kubeslice-controller-webhook-service
|
||||
namespace: kubeslice-controller
|
||||
path: /mutate-controller-kubeslice-io-v1alpha1-serviceexportconfig
|
||||
failurePolicy: Fail
|
||||
name: mserviceexportconfig.kb.io
|
||||
rules:
|
||||
- apiGroups:
|
||||
- controller.kubeslice.io
|
||||
apiVersions:
|
||||
- v1alpha1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- serviceexportconfigs
|
||||
sideEffects: None
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
caBundle: {{ $ca.Cert | b64enc }}
|
||||
service:
|
||||
name: kubeslice-controller-webhook-service
|
||||
namespace: kubeslice-controller
|
||||
path: /mutate-controller-kubeslice-io-v1alpha1-sliceconfig
|
||||
failurePolicy: Fail
|
||||
name: msliceconfig.kb.io
|
||||
rules:
|
||||
- apiGroups:
|
||||
- controller.kubeslice.io
|
||||
apiVersions:
|
||||
- v1alpha1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- sliceconfigs
|
||||
sideEffects: None
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
caBundle: {{ $ca.Cert | b64enc }}
|
||||
service:
|
||||
name: kubeslice-controller-webhook-service
|
||||
namespace: kubeslice-controller
|
||||
path: /mutate-controller-kubeslice-io-v1alpha1-sliceqosconfig
|
||||
failurePolicy: Fail
|
||||
name: msliceqosconfig.kb.io
|
||||
rules:
|
||||
- apiGroups:
|
||||
- controller.kubeslice.io
|
||||
apiVersions:
|
||||
- v1alpha1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- sliceqosconfigs
|
||||
sideEffects: None
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
caBundle: {{ $ca.Cert | b64enc }}
|
||||
service:
|
||||
name: kubeslice-controller-webhook-service
|
||||
namespace: kubeslice-controller
|
||||
path: /mutate-worker-kubeslice-io-v1alpha1-workersliceconfig
|
||||
failurePolicy: Fail
|
||||
name: mworkersliceconfig.kb.io
|
||||
rules:
|
||||
- apiGroups:
|
||||
- worker.kubeslice.io
|
||||
apiVersions:
|
||||
- v1alpha1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- workersliceconfigs
|
||||
sideEffects: None
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
caBundle: {{ $ca.Cert | b64enc }}
|
||||
service:
|
||||
name: kubeslice-controller-webhook-service
|
||||
namespace: kubeslice-controller
|
||||
path: /mutate-worker-kubeslice-io-v1alpha1-workerslicegateway
|
||||
failurePolicy: Fail
|
||||
name: mworkerslicegateway.kb.io
|
||||
rules:
|
||||
- apiGroups:
|
||||
- worker.kubeslice.io
|
||||
apiVersions:
|
||||
- v1alpha1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- workerslicegateways
|
||||
sideEffects: None
|
|
@ -0,0 +1,29 @@
|
|||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: kubeslice-controller-cleanup
|
||||
annotations:
|
||||
# This is what defines this resource as a hook. Without this line, the
|
||||
# job is considered part of the release.
|
||||
"helm.sh/hook": pre-delete
|
||||
"helm.sh/hook-delete-policy": hook-succeeded,hook-failed
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
serviceAccountName: kubeslice-controller-controller-manager
|
||||
containers:
|
||||
- name: cleanup
|
||||
image: '{{ .Values.kubeslice.controller.image }}:{{ .Values.kubeslice.controller.tag }}'
|
||||
imagePullPolicy: '{{ .Values.kubeslice.controller.pullPolicy }}'
|
||||
command:
|
||||
- /cleanup
|
||||
env:
|
||||
- name: KUBESLICE_CONTROLLER_MANAGER_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
imagePullSecrets:
|
||||
- name: kubeslice-image-pull-secret
|
||||
restartPolicy: Never
|
||||
backoffLimit: 1
|
|
@ -0,0 +1,284 @@
|
|||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
control-plane: controller-manager
|
||||
name: kubeslice-controller-manager
|
||||
namespace: kubeslice-controller
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
control-plane: controller-manager
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
kubectl.kubernetes.io/default-container: manager
|
||||
prometheus.io/port: "18080"
|
||||
prometheus.io/scrape: "true"
|
||||
labels:
|
||||
control-plane: controller-manager
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- --secure-listen-address=0.0.0.0:8443
|
||||
- --upstream=http://127.0.0.1:8080/
|
||||
- --logtostderr=true
|
||||
- --v=0
|
||||
image: '{{ .Values.kubeslice.rbacproxy.image }}:{{ .Values.kubeslice.rbacproxy.tag }}'
|
||||
name: kube-rbac-proxy
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
name: https
|
||||
protocol: TCP
|
||||
- args:
|
||||
- --health-probe-bind-address=:8081
|
||||
- --metrics-bind-address=127.0.0.1:8080
|
||||
- --leader-elect
|
||||
- --log-level={{ required "A valid value is required!" .Values.kubeslice.controller.logLevel }}
|
||||
- --rbac-resource-prefix={{ required "A valid value is required!" .Values.kubeslice.controller.rbacResourcePrefix }}
|
||||
- --project-namespace-prefix={{ required "A valid value is required!" .Values.kubeslice.controller.projectnsPrefix }}
|
||||
- --controller-end-point={{ required "A valid value is required!" .Values.kubeslice.controller.endpoint }}
|
||||
- --prometheus-service-endpoint={{ required "A valid value is required!" .Values.kubeslice.prometheus.url}}
|
||||
- --ovpn-job-image={{ .Values.kubeslice.ovpnJob.image }}:{{ .Values.kubeslice.ovpnJob.tag }}
|
||||
command:
|
||||
- /manager
|
||||
env:
|
||||
- name: KUBESLICE_CONTROLLER_MANAGER_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
image: '{{ .Values.kubeslice.controller.image }}:{{ .Values.kubeslice.controller.tag }}'
|
||||
imagePullPolicy: '{{ .Values.kubeslice.controller.pullPolicy }}'
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8081
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 20
|
||||
name: manager
|
||||
ports:
|
||||
- containerPort: 9443
|
||||
name: webhook-server
|
||||
protocol: TCP
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readyz
|
||||
port: 8081
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
resources:
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 128Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 64Mi
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
volumeMounts:
|
||||
- mountPath: /tmp/k8s-webhook-server/serving-certs
|
||||
name: cert
|
||||
readOnly: true
|
||||
- name: kubeslice-controller-event-schema-conf
|
||||
mountPath: /events/event-schema/
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
serviceAccountName: kubeslice-controller-controller-manager
|
||||
terminationGracePeriodSeconds: 10
|
||||
volumes:
|
||||
- name: kubeslice-controller-event-schema-conf
|
||||
configMap:
|
||||
name: kubeslice-controller-event-schema-conf
|
||||
defaultMode: 420
|
||||
- name: cert
|
||||
secret:
|
||||
defaultMode: 420
|
||||
secretName: webhook-server-cert-secret
|
||||
{{- if and .Values.imagePullSecrets .Values.imagePullSecrets.repository .Values.imagePullSecrets.username .Values.imagePullSecrets.password }}
|
||||
imagePullSecrets:
|
||||
- name: kubeslice-image-pull-secret
|
||||
{{- end }}
|
||||
---
|
||||
apiVersion: v1
|
||||
data:
|
||||
{{ if .Values.kubeslice.events.disabled}}
|
||||
controller.yaml: |-
|
||||
disabledEvents:
|
||||
- DefaultDeploymentSliceRoleTemplateCreationFailed
|
||||
- DefaultDeploymentSliceRoleTemplateCreated
|
||||
- DefaultReaderSliceRoleTemplateCreationFailed
|
||||
- DefaultReaderSliceRoleTemplateCreated
|
||||
- SliceRoleTemplateDeletionFailed
|
||||
- SliceRoleTemplateDeleted
|
||||
- SliceResourceQuotaCreationFailed
|
||||
- SliceResourceQuotaCreatedOnSliceConfigCreation
|
||||
- SliceResourceQuotaDeletionFailed
|
||||
- SliceResourceQuotaDeleted
|
||||
- SliceResourceQuotaRecreationFailed
|
||||
- SliceResourceQuotaRecreated
|
||||
- SetSliceConfigAsOwnerOfSliceResourceQuotaFailed
|
||||
- SetSliceConfigAsOwnerOfSliceResourceQuotaSucceeded
|
||||
- SliceResourceQuotaCreated
|
||||
- SliceResourceQuotaUpdated
|
||||
- AllRQSpecificationViolationMetricsResetSuccess
|
||||
- SliceRQSpecificationViolationMetricsResetSuccess
|
||||
- ClusterRQSpecificationViolationMetricsResetSuccess
|
||||
- OutdatedNamespaceRQSpecificationViolationMetricsResetSuccess
|
||||
- OutdatedClusterRQSpecificationViolationMetricsResetSuccess
|
||||
- SliceNodeAffinityDeletionFailed
|
||||
- SliceNodeAffinityDeleted
|
||||
- SetSliceConfigAsOwnerOfSliceNodeAffinityFailed
|
||||
- SetSliceConfigAsOwnerOfSliceNodeAffinitySucceeded
|
||||
- SliceRoleBindingDeletingFailed
|
||||
- SliceRoleBindingDeleted
|
||||
- SetSliceConfigAsOwnerOfSliceRoleBindingFailed
|
||||
- SetSliceConfigAsOwnerOfSliceRoleBindingSucceeded
|
||||
- WorkerSliceRoleBindingReconciliationSuccess
|
||||
- WorkerSliceRoleBindingDeletedForcefully
|
||||
- WorkerSliceRoleBindingRecreationFailed
|
||||
- WorkerSliceRoleBindingRecreated
|
||||
- WorkerSliceRoleBindingCreationFailed
|
||||
- WorkerSliceRoleBindingCreated
|
||||
- WorkerSliceRoleBindingUpdateFailed
|
||||
- WorkerSliceRoleBindingUpdated
|
||||
- WorkerSliceRoleBindingDeletionFailed
|
||||
- WorkerSliceRoleBindingDeleted
|
||||
- WorkerSliceNodeAffinityDeletedForcefully
|
||||
- WorkerSliceNodeAffinityRecreationFailed
|
||||
- WorkerSliceNodeAffinityRecreated
|
||||
- NodeAffinityRilesExpansionFailed
|
||||
- SliceNodeAffinityConfigDeepCopyFailed
|
||||
- WorkerSliceNodeAffinityCreationFailed
|
||||
- WorkerSliceNodeAffinityCreated
|
||||
- WorkerSliceNodeAffinityUpdateFailed
|
||||
- WorkerSliceNodeAffinityUpdated
|
||||
- WorkerSliceNodeAffinityDeletionFailed
|
||||
- WorkerSliceNodeAffinityDeleted
|
||||
- WorkerSliceResourceQuotaDeletedForcefully
|
||||
- WorkerSliceResourceQuotaRecreationFailed
|
||||
- WorkerSliceResourceQuotaRecreated
|
||||
- OffBoardedNamespaceUtilizationMetricsReset
|
||||
- ResourceQuotaMetricsPopulated
|
||||
- ClusterCPULimitViolated
|
||||
- ClusterMemoryLimitViolated
|
||||
- ClusterPodCountViolated
|
||||
- ClusterEphemeralStorageLimitViolated
|
||||
- ClusterCPURequestViolated
|
||||
- ClusterMemoryRequestViolated
|
||||
- ClusterEphemeralStorageRequestViolated
|
||||
- NamespaceCPULimitViolated
|
||||
- NamespaceMemoryLimitViolated
|
||||
- NamespacePodCountViolated
|
||||
- NamespaceEphemeralStorageLimitViolated
|
||||
- NamespaceCPURequestViolated
|
||||
- NamespaceMemoryRequestViolated
|
||||
- NamespaceEphemeralStorageRequestViolated
|
||||
- SliceCPULimitViolated
|
||||
- SliceMemoryLimitViolated
|
||||
- SlicePodCountViolated
|
||||
- SliceEphemeralStorageLimitViolated
|
||||
- SliceCPURequestViolated
|
||||
- SliceMemoryRequestViolated
|
||||
- SliceEphemeralStorageRequestViolated
|
||||
- WorkerSliceResourceQuotaCreationFailed
|
||||
- WorkerSliceResourceQuotaCreated
|
||||
- WorkerSliceResourceQuotaUpdateFailed
|
||||
- WorkerSliceResourceQuotaUpdated
|
||||
- WorkerSliceResourceQuotaDeletionFailed
|
||||
- WorkerSliceResourceQuotaDeleted
|
||||
- DetachClusterInititated
|
||||
- DetachClusterSucceeded
|
||||
- DetachClusterFailed
|
||||
- OffboardNamesapceInitiated
|
||||
- OffboardNamesapceSucceeded
|
||||
- OffboardNamesapceFailed
|
||||
- InactiveServiceAccountDeletionFailed
|
||||
- WorkerSliceGatewayCreated
|
||||
- ServiceExportConfigDeletionFailed
|
||||
- ReadWriteRoleCreated
|
||||
- DefaultRoleBindingCreated
|
||||
- DefaultRoleBindingDeleted
|
||||
- WorkerSliceGatewayRecreated
|
||||
- ClusterDeregistered
|
||||
- ReadOnlyRoleCreationFailed
|
||||
- ReadOnlyRoleUpdated
|
||||
- WorkerClusterRoleCreationFailed
|
||||
- DefaultRoleBindingCreationFailed
|
||||
- DefaultRoleBindingUpdated
|
||||
- InactiveRoleBindingDeleted
|
||||
- ServiceAccountDeleted
|
||||
- ProjectDeletionFailed
|
||||
- ClusterDeletionFailed
|
||||
- WorkerClusterRoleCreated
|
||||
- WorkerServiceImportRecreationFailed
|
||||
- WorkerSliceConfigCreationFailed
|
||||
- SliceGatewayJobCreated
|
||||
- WorkerServiceImportUpdateFailed
|
||||
- ServiceAccountCreationFailed
|
||||
- InactiveServiceAccountDeleted
|
||||
- WorkerServiceImportRecreated
|
||||
- ServiceAccountDeletionFailed
|
||||
- NamespaceCreated
|
||||
- ServiceAccountSecretCreated
|
||||
- DefaultRoleBindingUpdateFailed
|
||||
- WorkerServiceImportDeletedForcefully
|
||||
- WorkerServiceImportCreated
|
||||
- SliceQoSConfigDeleted
|
||||
- ReadWriteRoleCreationFailed
|
||||
- InactiveRoleBindingDeletionFailed
|
||||
- WorkerClusterRoleUpdated
|
||||
- WorkerSliceConfigUpdateFailed
|
||||
- WorkerSliceGatewayDeletionFailed
|
||||
- ClusterDeleted
|
||||
- ServiceExportConfigDeleted
|
||||
- SecretDeleted
|
||||
- ReadOnlyRoleUpdateFailed
|
||||
- WorkerServiceImportCreationFailed
|
||||
- WorkerSliceGatewayCreationFailed
|
||||
- SliceConfigDeletionFailed
|
||||
- WorkerSliceConfigDeletedForcefully
|
||||
- WorkerSliceConfigDeletionFailed
|
||||
- WorkerSliceGatewayDeleted
|
||||
- NamespaceDeleted
|
||||
- WorkerClusterRoleUpdateFailed
|
||||
- WorkerServiceImportDeletionFailed
|
||||
- ClusterInstallationFailed
|
||||
- WorkerSliceConfigUpdated
|
||||
- ClusterInstallationInProgress
|
||||
- ClusterDeregistrationInProgress
|
||||
- WorkerServiceImportDeleted
|
||||
- SliceConfigDeleted
|
||||
- SliceQoSConfigDeletionFailed
|
||||
- NamespaceDeletionFailed
|
||||
- WorkerSliceConfigRecreated
|
||||
- SliceGatewayJobCreationFailed
|
||||
- ClusterDeregisterFailed
|
||||
- SecretDeletionFailed
|
||||
- ReadWriteRoleUpdateFailed
|
||||
- WorkerSliceConfigRecreationFailed
|
||||
- ClusterInstallationPending
|
||||
- NamespaceCreationFailed
|
||||
- WorkerServiceImportUpdated
|
||||
- ReadWriteRoleUpdated
|
||||
- ServiceAccountCreated
|
||||
- ServiceAccountSecretCreationFailed
|
||||
- DefaultRoleBindingDeletionFailed
|
||||
- WorkerSliceConfigCreated
|
||||
- ProjectDeleted
|
||||
- ClusterDeregisterTimeout
|
||||
- ReadOnlyRoleCreated
|
||||
- WorkerSliceConfigDeleted
|
||||
- WorkerSliceGatewayDeletedForcefully
|
||||
- WorkerSliceGatewayRecreationFailed
|
||||
{{ else }}
|
||||
controller.yaml: |-
|
||||
disabledEvents:
|
||||
{{ end }}
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
labels:
|
||||
name: event-schema
|
||||
name: kubeslice-controller-event-schema-conf
|
||||
namespace: kubeslice-controller
|
|
@ -12,6 +12,15 @@ rules:
|
|||
resources:
|
||||
- projects
|
||||
- clusters
|
||||
- verbs:
|
||||
- get
|
||||
- list
|
||||
apiGroups:
|
||||
- ""
|
||||
- events.k8s.io/v1
|
||||
resources:
|
||||
- secrets
|
||||
- events
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
|
@ -28,4 +37,31 @@ roleRef:
|
|||
name: kubeslice-api-gw
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubeslice-api-gw
|
||||
name: kubeslice-api-gw
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: kubeslice-api-gw
|
||||
rules:
|
||||
- apiGroups:
|
||||
- apiextensions.k8s.io
|
||||
resources:
|
||||
- customresourcedefinitions
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kubeslice-api-gw
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kubeslice-api-gw
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubeslice-api-gw
|
||||
namespace: kubeslice-controller
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -11,6 +11,9 @@ spec:
|
|||
targetPort: 443
|
||||
protocol: TCP
|
||||
name: http
|
||||
{{- if eq .Values.kubeslice.uiproxy.service.type "NodePort" }}
|
||||
nodePort: {{ .Values.kubeslice.uiproxy.service.nodePort }}
|
||||
{{ end }}
|
||||
selector:
|
||||
app: kubeslice-ui-proxy
|
||||
type: {{ .Values.kubeslice.uiproxy.service.type }}
|
||||
|
|
|
@ -0,0 +1,43 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: kubeslice-ui-v2
|
||||
name: kubeslice-ui-v2
|
||||
namespace: kubeslice-controller
|
||||
spec:
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: kubeslice-ui-v2
|
||||
type: ClusterIP
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: kubeslice-ui-v2
|
||||
namespace: kubeslice-controller
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: kubeslice-ui-v2
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: kubeslice-ui-v2
|
||||
spec:
|
||||
containers:
|
||||
- image: '{{ .Values.kubeslice.uiv2.image }}:{{ .Values.kubeslice.uiv2.tag }}'
|
||||
imagePullPolicy: '{{ .Values.kubeslice.uiv2.pullPolicy }}'
|
||||
name: kubeslice-ui-v2
|
||||
ports:
|
||||
- containerPort: 3000
|
||||
{{- if and .Values.imagePullSecrets .Values.imagePullSecrets.repository .Values.imagePullSecrets.username .Values.imagePullSecrets.password }}
|
||||
imagePullSecrets:
|
||||
- name: kubeslice-ui-image-pull-secret
|
||||
{{- end }}
|
|
@ -0,0 +1,199 @@
|
|||
---
|
||||
{{ if .Values.kubeslice.prometheus.enabled}}
|
||||
apiVersion: v1
|
||||
data:
|
||||
prometheus.rules: |-
|
||||
groups:
|
||||
- name: Resource Quota violation alerts
|
||||
rules:
|
||||
- alert: Slice Resource Quota Violation
|
||||
expr: kubeslice_controller_slice_quota_violation > 0
|
||||
for: 1m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: "Slice Resource Usage Exceeded Quota"
|
||||
description: "The slice {{ "{{" }} $labels.slice_name}} has CPU usage {{ "{{" }} $labels.cpu}} and memory usage {{ "{{" }} $labels.memory}}: violated by {{ "{{" }} $labels.violated_resource_type}}"
|
||||
- alert: Cluster Resource Quota Violation
|
||||
expr: kubeslice_controller_cluster_quota_violation > 0
|
||||
for: 1m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: "Cluster Resource Usage Exceeded Quota"
|
||||
description: "The cluster {{ "{{" }} $labels.cluster_name}} has CPU usage {{ "{{" }} $labels.cpu}} and memory usage {{ "{{" }} $labels.memory}}: violated by {{ "{{" }} $labels.violated_resource_type}}"
|
||||
- alert: Namespace Resource Quota Violation
|
||||
expr: kubeslice_controller_namespace_quota_violation > 0
|
||||
for: 1m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: "Namespace Resource Usage Exceeded Quota"
|
||||
description: "The namespace {{ "{{" }}$labels.namespace}} has CPU usage {{ "{{" }}$labels.cpu}} and memory usage {{ "{{" }}$labels.memory}}: violated by {{ "{{" }}$labels.violated_resource_type}}"
|
||||
prometheus.yml: |-
|
||||
global:
|
||||
scrape_interval: 5s
|
||||
evaluation_interval: 5s
|
||||
rule_files:
|
||||
- /etc/prometheus/prometheus.rules
|
||||
scrape_configs:
|
||||
- job_name: 'kubernetes-pods'
|
||||
kubernetes_sd_configs:
|
||||
- role: pod
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
|
||||
action: keep
|
||||
regex: true
|
||||
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
|
||||
action: replace
|
||||
target_label: __metrics_path__
|
||||
regex: (.+)
|
||||
- source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
|
||||
action: replace
|
||||
regex: ([^:]+)(?::\d+)?;(\d+)
|
||||
replacement: $1:$2
|
||||
target_label: __address__
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_pod_label_(.+)
|
||||
- source_labels: [__meta_kubernetes_namespace]
|
||||
action: replace
|
||||
target_label: kubernetes_namespace
|
||||
- source_labels: [__meta_kubernetes_pod_name]
|
||||
action: replace
|
||||
target_label: kubernetes_pod_name
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
labels:
|
||||
name: prometheus-server-conf
|
||||
name: kubeslice-controller-prometheus-server-conf
|
||||
namespace: kubeslice-controller
|
||||
{{ end }}
|
||||
|
||||
---
|
||||
{{ if .Values.kubeslice.prometheus.enabled}}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: kubeslice-controller-prometheus-service
|
||||
namespace: kubeslice-controller
|
||||
spec:
|
||||
ports:
|
||||
- port: 9090
|
||||
targetPort: 9090
|
||||
selector:
|
||||
app: prometheus-server
|
||||
type: ClusterIP
|
||||
{{ end }}
|
||||
|
||||
---
|
||||
{{ if .Values.kubeslice.prometheus.enabled}}
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: prometheus-server
|
||||
name: kubeslice-controller-prometheus
|
||||
namespace: kubeslice-controller
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: prometheus-server
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 0
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: prometheus-server
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- --config.file=/etc/prometheus/prometheus.yml
|
||||
- --storage.tsdb.path=/prometheus/
|
||||
image: prom/prometheus
|
||||
name: prometheus
|
||||
ports:
|
||||
- containerPort: 9090
|
||||
volumeMounts:
|
||||
- mountPath: /etc/prometheus/
|
||||
name: prometheus-config-volume
|
||||
- mountPath: /prometheus/
|
||||
name: prometheus-storage-volume
|
||||
volumes:
|
||||
- configMap:
|
||||
defaultMode: 420
|
||||
name: kubeslice-controller-prometheus-server-conf
|
||||
name: prometheus-config-volume
|
||||
- emptyDir: {}
|
||||
name: prometheus-storage-volume
|
||||
{{ end }}
|
||||
|
||||
---
|
||||
{{ if .Values.kubeslice.prometheus.enabled}}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kubeslice-controller-prometheus
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kubeslice-controller-prometheus
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: default
|
||||
namespace: kubeslice-controller
|
||||
{{ end }}
|
||||
|
||||
---
|
||||
{{ if .Values.kubeslice.prometheus.enabled}}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: kubeslice-controller-prometheus
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
- nodes/proxy
|
||||
- services
|
||||
- endpoints
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- nonResourceURLs:
|
||||
- /metrics
|
||||
verbs:
|
||||
- get
|
||||
{{ end }}
|
||||
---
|
||||
{{ if .Values.kubeslice.prometheus.enabled}}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: kube-state-metrics
|
||||
app.kubernetes.io/version: v1.8.0
|
||||
name: kubeslice-controller-kube-state-metrics
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kubeslice-controller-kube-state-metrics
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kube-state-metrics
|
||||
namespace: kube-system
|
||||
{{ end }}
|
|
@ -0,0 +1,81 @@
|
|||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"kubeslice": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"ui": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"image": {"type": "string"},
|
||||
"tag": {"type": "string"},
|
||||
"pullPolicy": {"type": "string"}
|
||||
}
|
||||
},
|
||||
"uiv2": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"image": {"type": "string"},
|
||||
"tag": {"type": "string"},
|
||||
"pullPolicy": {"type": "string"}
|
||||
}
|
||||
},
|
||||
"dashboard": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"image": {"type": "string"},
|
||||
"tag": {"type": "string"},
|
||||
"pullPolicy": {"type": "string"}
|
||||
}
|
||||
},
|
||||
"uiproxy": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"image": {"type": "string"},
|
||||
"tag": {"type": "string"},
|
||||
"pullPolicy": {"type": "string"},
|
||||
"service": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"type": {
|
||||
"type": "string",
|
||||
"enum": ["LoadBalancer", "NodePort", "ClusterIP"],
|
||||
"default": "LoadBalancer"
|
||||
},
|
||||
"nodePort": {
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
"required": ["type"]
|
||||
}
|
||||
}
|
||||
},
|
||||
"apigw": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"image": {"type": "string"},
|
||||
"tag": {"type": "string"},
|
||||
"pullPolicy": {"type": "string"}
|
||||
}
|
||||
},
|
||||
"prometheus": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"url": {"type": "string"}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"imagePullSecrets": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"repository": {"type": [ "string", "null" ]},
|
||||
"username": {"type": [ "string", "null" ]},
|
||||
"password": {"type": [ "string", "null" ]},
|
||||
"email": {"type": [ "string", "null" ]}
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": ["kubeslice", "imagePullSecrets"]
|
||||
}
|
|
@ -9,7 +9,7 @@ kubeslice:
|
|||
projectnsPrefix: kubeslice
|
||||
endpoint:
|
||||
image: aveshasystems/kubeslice-controller-ent
|
||||
tag: 0.5.0
|
||||
tag: 0.10.0
|
||||
pullPolicy: IfNotPresent
|
||||
ovpnJob:
|
||||
image: aveshasystems/gateway-certs-generator
|
||||
|
@ -17,11 +17,17 @@ kubeslice:
|
|||
prometheus:
|
||||
enabled: true
|
||||
url: http://kubeslice-controller-prometheus-service:9090
|
||||
events:
|
||||
disabled: false
|
||||
|
||||
# Kubeslice UI settings
|
||||
ui:
|
||||
image: aveshasystems/kubeslice-ui-ent
|
||||
tag: 0.5.0
|
||||
tag: 0.10.0
|
||||
pullPolicy: IfNotPresent
|
||||
uiv2:
|
||||
image: aveshasystems/kubeslice-ui-v2-ent
|
||||
tag: 0.2.0
|
||||
pullPolicy: IfNotPresent
|
||||
dashboard:
|
||||
image: aveshasystems/kubeslice-kubernetes-dashboard
|
||||
|
@ -29,16 +35,18 @@ kubeslice:
|
|||
pullPolicy: IfNotPresent
|
||||
uiproxy:
|
||||
image: aveshasystems/kubeslice-ui-proxy
|
||||
tag: 1.0.4
|
||||
tag: 1.1.0
|
||||
pullPolicy: IfNotPresent
|
||||
service:
|
||||
## For kind, set this to NodePort, elsewhere use LoadBalancer or NodePort
|
||||
## Ref: https://kubernetes.io/docs/user-guide/services/#publishing-services---service-types
|
||||
##
|
||||
type: LoadBalancer
|
||||
## if type selected to NodePort then set nodePort value if required
|
||||
# nodePort:
|
||||
apigw:
|
||||
image: aveshasystems/kubeslice-api-gw-ent
|
||||
tag: 1.3.5
|
||||
tag: 1.7.1
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
# username & password & email values for imagePullSecrets has to provided to create a secret
|
||||
|
|
|
@ -5,7 +5,7 @@ annotations:
|
|||
catalog.cattle.io/namespace: kubeslice-system
|
||||
catalog.cattle.io/release-name: kubeslice-worker
|
||||
apiVersion: v2
|
||||
appVersion: 0.5.0
|
||||
appVersion: 0.10.0
|
||||
description: Multi cloud networking (MCN), multi cluster, hybrid cloud networking
|
||||
tool for efficient, secure, policy-enforced connectivity and true multi-tenancy
|
||||
capabilities. KubeSlice enables enterprise platform teams to reduce infrastructure
|
||||
|
@ -36,4 +36,4 @@ keywords:
|
|||
kubeVersion: '>= 1.19.0-0'
|
||||
name: kubeslice-worker
|
||||
type: application
|
||||
version: 0.5.0
|
||||
version: 0.10.0
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
## Prerequisites
|
||||
- KubeSlice Controller needs to be installed
|
||||
- Create and configure the worker cluster following instructions in prerequisites and "registering the worker cluster" sections [documentation](https://docs.avesha.io/documentation/enterprise/0.5.0/deployment-partners/deploying-kubeslice-on-rancher/installing-the-worker-operator-on-rancher)
|
||||
- Create and configure the worker cluster following instructions in prerequisites and "registering the worker cluster" sections [documentation](https://docs.avesha.io/documentation/enterprise/0.10.0/deployment-partners/deploying-kubeslice-on-rancher/installing-the-worker-operator-on-rancher)
|
||||
- Copy the chart version from the upper right hand section of this page [VERSION parameter need during install and upgrade]
|
||||
- Click on the download link from the upper right hand section of this page, save it to location available from command prompt <LOCATION OF DOWNLOADED CHART.tgz>
|
||||
- Untar the chart to get the values.yaml file and edit the following fields
|
||||
|
@ -34,7 +34,7 @@ helm upgrade --history-max=5 --namespace=kubeslice-system kubeslice-worker kubes
|
|||
```
|
||||
|
||||
### Uninstall Kubeslice Worker
|
||||
- Follow instructions [documentation](https://docs.avesha.io/documentation/enterprise/0.5.0/getting-started-with-cloud-clusters/uninstalling-kubeslice/deregistering-the-worker-cluster)
|
||||
- Follow instructions [documentation](https://docs.avesha.io/documentation/enterprise/0.10.0/getting-started-with-cloud-clusters/uninstalling-kubeslice/deregistering-the-worker-cluster)
|
||||
|
||||
```console
|
||||
export KUBECONFIG=<WORKER CLUSTER KUBECONFIG>
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
apiVersion: v1
|
||||
appVersion: 0.2.1
|
||||
appVersion: 0.2.0
|
||||
description: A Helm chart for Kubernetes
|
||||
name: admission-webhook
|
||||
version: 0.2.1
|
||||
version: 0.2.0
|
||||
|
|
|
@ -3,11 +3,11 @@
|
|||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
webhookImageRegistry: aveshasystems/cmd-admission-webhook-k8s
|
||||
webhookImageTag: 1.5.3
|
||||
webhookImageRegistry: docker.io/aveshasystems/cmd-admission-webhook-k8s
|
||||
webhookImageTag: 1.6.1
|
||||
|
||||
nsmInjectContainerImageRegistry: aveshasystems/cmd-nsc
|
||||
nsmInjectContainerImageTag: 1.5.3
|
||||
nsmInjectContainerImageTag: 1.5.4
|
||||
|
||||
nsmInjectInitContainerImageRegistry: aveshasystems/cmd-nsc-init
|
||||
nsmInjectInitContainerImageTag: 1.5.3
|
||||
|
|
|
@ -5,6 +5,7 @@ metadata:
|
|||
name: nsm-config
|
||||
namespace: {{ .Release.Namespace }}
|
||||
annotations:
|
||||
"helm.sh/resource-policy": keep
|
||||
"helm.sh/hook": pre-install
|
||||
"helm.sh/hook-weight": "1"
|
||||
data:
|
||||
excluded_prefixes_output.yaml: ''
|
|
@ -10,7 +10,7 @@ global:
|
|||
|
||||
forwardingPlane:
|
||||
kernelImageRegistry: docker.io/aveshasystems/cmd-forwarder-kernel
|
||||
kernelImageTag: 1.0.0
|
||||
kernelImageTag: 1.0.1
|
||||
|
||||
nsmgr:
|
||||
imageRegistry: docker.io/aveshasystems/cmd-nsmgr
|
||||
|
@ -21,4 +21,4 @@ nsmgr:
|
|||
excludePrefixesImageTag: 1.5.2
|
||||
|
||||
registryK8sImageRegistry: docker.io/aveshasystems/cmd-registry-k8s
|
||||
registryK8sImageTag: 1.5.2
|
||||
registryK8sImageTag: 1.5.4
|
||||
|
|
|
@ -34,6 +34,9 @@ spec:
|
|||
- jsonPath: .status.exportStatus
|
||||
name: Status
|
||||
type: string
|
||||
- jsonPath: .spec.aliases
|
||||
name: Alias
|
||||
type: string
|
||||
name: v1beta1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
|
@ -54,6 +57,12 @@ spec:
|
|||
spec:
|
||||
description: ServiceExportSpec defines the desired state of ServiceExport
|
||||
properties:
|
||||
aliases:
|
||||
description: Alias names for the exported service. The service could
|
||||
be addressed by the alias names in addition to the slice.local name.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
ingressEnabled:
|
||||
description: IngressEnabled denotes whether the traffic should be
|
||||
proxied through an ingress gateway
|
||||
|
@ -135,6 +144,12 @@ spec:
|
|||
status:
|
||||
description: ServiceExportStatus defines the observed state of ServiceExport
|
||||
properties:
|
||||
aliases:
|
||||
description: Alias names for the exported service. The service could
|
||||
be addressed by the alias names in addition to the slice.local name.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
availableEndpoints:
|
||||
description: AvailableEndpoints shows the number of available endpoints
|
||||
type: integer
|
||||
|
|
|
@ -31,6 +31,9 @@ spec:
|
|||
- jsonPath: .status.importStatus
|
||||
name: Status
|
||||
type: string
|
||||
- jsonPath: .spec.aliases
|
||||
name: Alias
|
||||
type: string
|
||||
name: v1beta1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
|
@ -51,6 +54,12 @@ spec:
|
|||
spec:
|
||||
description: ServiceImportSpec defines the desired state of ServiceImport
|
||||
properties:
|
||||
aliases:
|
||||
description: Alias names for the exported service. The service could
|
||||
be addressed by the alias names in addition to the slice.local name.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
dnsName:
|
||||
description: DNSName shows the FQDN to reach the service
|
||||
type: string
|
||||
|
|
|
@ -14,6 +14,7 @@ spec:
|
|||
listKind: SliceGatewayList
|
||||
plural: slicegateways
|
||||
shortNames:
|
||||
- gw
|
||||
- slicegw
|
||||
singular: slicegateway
|
||||
scope: Namespaced
|
||||
|
@ -77,21 +78,27 @@ spec:
|
|||
sliceGatewayName:
|
||||
description: Slice Gateway Name
|
||||
type: string
|
||||
sliceGatewayNodePort:
|
||||
sliceGatewayNodePorts:
|
||||
description: Node port
|
||||
type: integer
|
||||
items:
|
||||
type: integer
|
||||
type: array
|
||||
sliceGatewayRemoteClusterId:
|
||||
description: Remote Cluster ID
|
||||
type: string
|
||||
sliceGatewayRemoteGatewayId:
|
||||
description: Remote Gateway ID
|
||||
type: string
|
||||
sliceGatewayRemoteNodeIp:
|
||||
description: Remote Node IP
|
||||
type: string
|
||||
sliceGatewayRemoteNodePort:
|
||||
sliceGatewayRemoteNodeIps:
|
||||
description: Remote Node IPs
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
sliceGatewayRemoteNodePorts:
|
||||
description: Remote Node Port
|
||||
type: integer
|
||||
items:
|
||||
type: integer
|
||||
type: array
|
||||
sliceGatewayRemoteSubnet:
|
||||
description: Remote Node Subnet
|
||||
type: string
|
||||
|
@ -121,21 +128,67 @@ spec:
|
|||
in pod
|
||||
format: int64
|
||||
type: integer
|
||||
localIp:
|
||||
description: LocalIP is the gateway tunnel ip
|
||||
type: string
|
||||
localNsmIp:
|
||||
description: LocalNsmIP is the IP on the nsm interface to Slice Router
|
||||
type: string
|
||||
gatewayPodStatus:
|
||||
description: gatewayPodStatus is a list that consists of status of
|
||||
individual gatewaypods
|
||||
items:
|
||||
properties:
|
||||
localNsmIP:
|
||||
type: string
|
||||
peerPodName:
|
||||
type: string
|
||||
podIP:
|
||||
type: string
|
||||
podName:
|
||||
type: string
|
||||
routeRemoved:
|
||||
format: int32
|
||||
type: integer
|
||||
tunnelStatus:
|
||||
properties:
|
||||
IntfName:
|
||||
type: string
|
||||
Latency:
|
||||
format: int64
|
||||
type: integer
|
||||
LocalIP:
|
||||
type: string
|
||||
PacketLoss:
|
||||
format: int64
|
||||
type: integer
|
||||
RemoteIP:
|
||||
type: string
|
||||
RxRate:
|
||||
format: int64
|
||||
type: integer
|
||||
Status:
|
||||
format: int32
|
||||
type: integer
|
||||
TxRate:
|
||||
format: int64
|
||||
type: integer
|
||||
type: object
|
||||
type: object
|
||||
type: array
|
||||
peerIp:
|
||||
description: PeerIP is the gateway tunnel peer ip
|
||||
type: string
|
||||
podIp:
|
||||
description: PodIP is the Ip of the gateway pod running in cluster
|
||||
type: string
|
||||
podIps:
|
||||
description: PodIPs is the list of Ip of the gateway pods running
|
||||
in cluster
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
podName:
|
||||
description: PodName is the name of the gateway pod running in cluster
|
||||
description: Deprecated PodName is the name of the gateway pod running
|
||||
in cluster
|
||||
type: string
|
||||
podNames:
|
||||
description: PodNames is the list of names of the gateway pods running
|
||||
in cluster
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
podStatus:
|
||||
description: PodStatus shows whether gateway pod is healthy
|
||||
type: string
|
||||
|
|
|
@ -17,7 +17,7 @@ questions:
|
|||
variable: imagePullSecrets.password
|
||||
-
|
||||
default: ""
|
||||
description: "https://docs.avesha.io/documentation/enterprise/0.5.0/deployment-partners/deploying-kubeslice-on-rancher/installing-the-worker-operator-on-rancher#getting-the-secrets-of-the-registered-cluster"
|
||||
description: "https://docs.avesha.io/documentation/enterprise/0.10.0/deployment-partners/deploying-kubeslice-on-rancher/installing-the-worker-operator-on-rancher#getting-the-secrets-of-the-registered-cluster"
|
||||
group: "Worker Secrets from Controller"
|
||||
label: "Controller Namespace"
|
||||
required: true
|
||||
|
@ -25,7 +25,7 @@ questions:
|
|||
variable: controllerSecret.namespace
|
||||
-
|
||||
default: ""
|
||||
description: "https://docs.avesha.io/documentation/enterprise/0.5.0/deployment-partners/deploying-kubeslice-on-rancher/installing-the-worker-operator-on-rancher#getting-the-secrets-of-the-registered-cluster"
|
||||
description: "https://docs.avesha.io/documentation/enterprise/0.10.0/deployment-partners/deploying-kubeslice-on-rancher/installing-the-worker-operator-on-rancher#getting-the-secrets-of-the-registered-cluster"
|
||||
group: "Worker Secrets from Controller"
|
||||
label: "Controller Endpoint"
|
||||
required: true
|
||||
|
@ -33,7 +33,7 @@ questions:
|
|||
variable: controllerSecret.endpoint
|
||||
-
|
||||
default: ""
|
||||
description: "https://docs.avesha.io/documentation/enterprise/0.5.0/deployment-partners/deploying-kubeslice-on-rancher/installing-the-worker-operator-on-rancher#getting-the-secrets-of-the-registered-cluster"
|
||||
description: "https://docs.avesha.io/documentation/enterprise/0.10.0/deployment-partners/deploying-kubeslice-on-rancher/installing-the-worker-operator-on-rancher#getting-the-secrets-of-the-registered-cluster"
|
||||
group: "Worker Secrets from Controller"
|
||||
label: "Controller CA Cert"
|
||||
required: true
|
||||
|
@ -41,7 +41,7 @@ questions:
|
|||
variable: controllerSecret.'ca.crt'
|
||||
-
|
||||
default: ""
|
||||
description: "https://docs.avesha.io/documentation/enterprise/0.5.0/deployment-partners/deploying-kubeslice-on-rancher/installing-the-worker-operator-on-rancher#getting-the-secrets-of-the-registered-cluster"
|
||||
description: "https://docs.avesha.io/documentation/enterprise/0.10.0/deployment-partners/deploying-kubeslice-on-rancher/installing-the-worker-operator-on-rancher#getting-the-secrets-of-the-registered-cluster"
|
||||
group: "Worker Secrets from Controller"
|
||||
label: "Controller Token"
|
||||
required: true
|
||||
|
@ -57,7 +57,7 @@ questions:
|
|||
variable: cluster.name
|
||||
-
|
||||
default: ""
|
||||
description: "Worker Cluster Endpoint,use 'kubectl cluster-info on worker cluster' or for details please follow https://docs.avesha.io/documentation/enterprise/0.5.0/"
|
||||
description: "Worker Cluster Endpoint,use 'kubectl cluster-info on worker cluster' or for details please follow https://docs.avesha.io/documentation/enterprise/0.10.0/"
|
||||
group: "Worker Cluster Details"
|
||||
label: "Cluster Endpoint"
|
||||
required: true
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: kubeslice-delete-webhooks
|
||||
name: kubeslice-postdelete-job
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
|
||||
|
@ -10,7 +10,7 @@ metadata:
|
|||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote}}
|
||||
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
|
||||
annotations:
|
||||
"helm.sh/hook": pre-delete,pre-rollback
|
||||
"helm.sh/hook": post-delete
|
||||
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
|
||||
"helm.sh/hook-weight": "1"
|
||||
|
||||
|
@ -18,9 +18,9 @@ metadata:
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kubeslice-delete-webhooks
|
||||
name: kubeslice-postdelete-job
|
||||
annotations:
|
||||
"helm.sh/hook": pre-delete,pre-rollback
|
||||
"helm.sh/hook": post-delete
|
||||
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
|
||||
"helm.sh/hook-weight": "-1"
|
||||
labels:
|
||||
|
@ -30,19 +30,19 @@ metadata:
|
|||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kubeslice-delete-webhooks
|
||||
name: kubeslice-postdelete-job
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubeslice-delete-webhooks
|
||||
name: kubeslice-postdelete-job
|
||||
namespace: {{ .Release.Namespace }}
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: kubeslice-delete-webhooks
|
||||
name: kubeslice-postdelete-job
|
||||
annotations:
|
||||
"helm.sh/hook": pre-delete,pre-rollback
|
||||
"helm.sh/hook": post-delete
|
||||
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
|
||||
"helm.sh/hook-weight": "-1"
|
||||
labels:
|
||||
|
@ -54,6 +54,7 @@ rules:
|
|||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
|
@ -68,7 +69,7 @@ rules:
|
|||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kubeslice-delete-webhooks
|
||||
name: kubeslice-postdelete-job
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
|
||||
|
@ -76,36 +77,36 @@ metadata:
|
|||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
|
||||
annotations:
|
||||
"helm.sh/hook": pre-delete,pre-rollback
|
||||
"helm.sh/hook": post-delete
|
||||
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
|
||||
"helm.sh/hook-weight": "1"
|
||||
data:
|
||||
delete-admission-webhook.sh: |-
|
||||
kubeslice-cleanup.sh: |-
|
||||
#!/usr/bin/env bash
|
||||
NAMESPACE={{ .Release.Namespace | quote}}
|
||||
echo "finding and removing spiffeids in namespace $NAMESPACE ..."
|
||||
for item in $(kubectl get spiffeid.spiffeid.spiffe.io -n $NAMESPACE -o name); do
|
||||
echo "removing item $item"
|
||||
kubectl patch $item -p '{"metadata":{"finalizers":null}}' --type=merge -n $NAMESPACE
|
||||
kubectl delete $item --ignore-not-found -n $NAMESPACE
|
||||
done
|
||||
# TODO: once we figure out how to keep spire in release ns then we could remove this
|
||||
NAMESPACE="spire"
|
||||
echo "finding and removing spiffeids in namespace $NAMESPACE ..."
|
||||
for item in $(kubectl get spiffeid.spiffeid.spiffe.io -n $NAMESPACE -o name); do
|
||||
echo "removing item $item"
|
||||
kubectl patch $item -p '{"metadata":{"finalizers":null}}' --type=merge -n $NAMESPACE
|
||||
kubectl delete $item --ignore-not-found -n $NAMESPACE
|
||||
NAMESPACES=(spire kubeslice-system)
|
||||
for ns in ${NAMESPACES[@]}
|
||||
do
|
||||
kubectl get ns $ns -o name
|
||||
if [[ $? -eq 1 ]]; then
|
||||
echo "$ns namespace was deleted successfully"
|
||||
continue
|
||||
fi
|
||||
echo "finding and removing spiffeids in namespace $ns ..."
|
||||
for item in $(kubectl get spiffeid.spiffeid.spiffe.io -n $ns -o name); do
|
||||
echo "removing item $item"
|
||||
kubectl patch $item -p '{"metadata":{"finalizers":null}}' --type=merge -n $ns
|
||||
kubectl delete $item --ignore-not-found -n $ns
|
||||
done
|
||||
done
|
||||
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: kubeslice-delete-webhooks
|
||||
name: kubeslice-postdelete-job
|
||||
namespace: {{ .Release.Namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-delete,pre-rollback
|
||||
"helm.sh/hook": post-delete
|
||||
"helm.sh/hook-delete-policy": before-hook-creation
|
||||
"helm.sh/hook-weight": "2"
|
||||
labels:
|
||||
|
@ -116,21 +117,21 @@ spec:
|
|||
backoffLimit: 3
|
||||
template:
|
||||
metadata:
|
||||
name: kubeslice-delete-webhooks
|
||||
name: kubeslice-postdelete-job
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
serviceAccountName: kubeslice-delete-webhooks
|
||||
serviceAccountName: kubeslice-postdelete-job
|
||||
containers:
|
||||
- name: kubectl
|
||||
image: "alpine/k8s:1.22.9"
|
||||
command:
|
||||
- /bin/bash
|
||||
- /tmp/delete-admission-webhook.sh
|
||||
- /tmp/kubeslice-cleanup.sh
|
||||
volumeMounts:
|
||||
- mountPath: /tmp
|
||||
name: kubeslice-delete-webhooks
|
||||
name: kubeslice-cleanup
|
||||
volumes:
|
||||
- name: kubeslice-delete-webhooks
|
||||
- name: kubeslice-cleanup
|
||||
configMap:
|
||||
name: kubeslice-delete-webhooks
|
||||
restartPolicy: OnFailure
|
||||
name: kubeslice-postdelete-job
|
||||
restartPolicy: OnFailure
|
|
@ -83,8 +83,6 @@ spec:
|
|||
value: "{{ .Values.routerSidecar.image }}:{{ .Values.routerSidecar.tag }}"
|
||||
- name: AVESHA_VL3_SIDECAR_IMAGE_PULLPOLICY
|
||||
value: {{ .Values.routerSidecar.pullPolicy }}
|
||||
- name: NODE_IP
|
||||
value: "{{ .Values.cluster.nodeIp }}"
|
||||
- name: CLUSTER_ENDPOINT
|
||||
value: "{{ .Values.cluster.endpoint }}"
|
||||
- name: AVESHA_GW_SIDECAR_IMAGE
|
||||
|
@ -128,6 +126,8 @@ spec:
|
|||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
volumeMounts:
|
||||
- name: kubeslice-worker-event-schema-conf
|
||||
mountPath: /events/event-schema/
|
||||
- mountPath: /var/run/secrets/kubernetes.io/hub-serviceaccount
|
||||
name: hub-secret
|
||||
readOnly: true
|
||||
|
@ -137,6 +137,10 @@ spec:
|
|||
serviceAccountName: kubeslice-controller-manager
|
||||
terminationGracePeriodSeconds: 10
|
||||
volumes:
|
||||
- name: kubeslice-worker-event-schema-conf
|
||||
configMap:
|
||||
name: kubeslice-worker-event-schema-conf
|
||||
defaultMode: 420
|
||||
- name: webhook-certs
|
||||
secret:
|
||||
secretName: kubeslice-admission-webhook-certs
|
||||
|
@ -153,3 +157,31 @@ spec:
|
|||
imagePullSecrets:
|
||||
- name: kubeslice-image-pull-secret
|
||||
{{- end }}
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubeslice.io/node-type
|
||||
operator: In
|
||||
values:
|
||||
- gateway
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
data:
|
||||
{{ if .Values.events.disabled }}
|
||||
worker.yaml: |-
|
||||
disabledEvents:
|
||||
- LicenseSecretNotFound
|
||||
{{ else }}
|
||||
worker.yaml: |-
|
||||
disabledEvents:
|
||||
- LicenseSecretNotFound
|
||||
{{ end }}
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
labels:
|
||||
name: event-schema
|
||||
name: kubeslice-worker-event-schema-conf
|
||||
namespace: kubeslice-system
|
||||
|
|
|
@ -635,6 +635,9 @@ data:
|
|||
- jsonPath: .status.exportStatus
|
||||
name: Status
|
||||
type: string
|
||||
- jsonPath: .spec.aliases
|
||||
name: Alias
|
||||
type: string
|
||||
name: v1beta1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
|
@ -655,6 +658,12 @@ data:
|
|||
spec:
|
||||
description: ServiceExportSpec defines the desired state of ServiceExport
|
||||
properties:
|
||||
aliases:
|
||||
description: Alias names for the exported service. The service could
|
||||
be addressed by the alias names in addition to the slice.local name.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
ingressEnabled:
|
||||
description: IngressEnabled denotes whether the traffic should be
|
||||
proxied through an ingress gateway
|
||||
|
@ -736,6 +745,12 @@ data:
|
|||
status:
|
||||
description: ServiceExportStatus defines the observed state of ServiceExport
|
||||
properties:
|
||||
aliases:
|
||||
description: Alias names for the exported service. The service could
|
||||
be addressed by the alias names in addition to the slice.local name.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
availableEndpoints:
|
||||
description: AvailableEndpoints shows the number of available endpoints
|
||||
type: integer
|
||||
|
@ -838,6 +853,9 @@ data:
|
|||
- jsonPath: .status.importStatus
|
||||
name: Status
|
||||
type: string
|
||||
- jsonPath: .spec.aliases
|
||||
name: Alias
|
||||
type: string
|
||||
name: v1beta1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
|
@ -858,6 +876,12 @@ data:
|
|||
spec:
|
||||
description: ServiceImportSpec defines the desired state of ServiceImport
|
||||
properties:
|
||||
aliases:
|
||||
description: Alias names for the exported service. The service could
|
||||
be addressed by the alias names in addition to the slice.local name.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
dnsName:
|
||||
description: DNSName shows the FQDN to reach the service
|
||||
type: string
|
||||
|
@ -952,7 +976,6 @@ data:
|
|||
plural: ""
|
||||
conditions: []
|
||||
storedVersions: []
|
||||
networking.kubeslice.io_slicegateways.yaml: |2
|
||||
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
|
@ -969,6 +992,7 @@ data:
|
|||
listKind: SliceGatewayList
|
||||
plural: slicegateways
|
||||
shortNames:
|
||||
- gw
|
||||
- slicegw
|
||||
singular: slicegateway
|
||||
scope: Namespaced
|
||||
|
@ -1032,21 +1056,27 @@ data:
|
|||
sliceGatewayName:
|
||||
description: Slice Gateway Name
|
||||
type: string
|
||||
sliceGatewayNodePort:
|
||||
sliceGatewayNodePorts:
|
||||
description: Node port
|
||||
type: integer
|
||||
items:
|
||||
type: integer
|
||||
type: array
|
||||
sliceGatewayRemoteClusterId:
|
||||
description: Remote Cluster ID
|
||||
type: string
|
||||
sliceGatewayRemoteGatewayId:
|
||||
description: Remote Gateway ID
|
||||
type: string
|
||||
sliceGatewayRemoteNodeIp:
|
||||
description: Remote Node IP
|
||||
type: string
|
||||
sliceGatewayRemoteNodePort:
|
||||
sliceGatewayRemoteNodeIps:
|
||||
description: Remote Node IPs
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
sliceGatewayRemoteNodePorts:
|
||||
description: Remote Node Port
|
||||
type: integer
|
||||
items:
|
||||
type: integer
|
||||
type: array
|
||||
sliceGatewayRemoteSubnet:
|
||||
description: Remote Node Subnet
|
||||
type: string
|
||||
|
@ -1076,21 +1106,67 @@ data:
|
|||
in pod
|
||||
format: int64
|
||||
type: integer
|
||||
localIp:
|
||||
description: LocalIP is the gateway tunnel ip
|
||||
type: string
|
||||
localNsmIp:
|
||||
description: LocalNsmIP is the IP on the nsm interface to Slice Router
|
||||
type: string
|
||||
gatewayPodStatus:
|
||||
description: gatewayPodStatus is a list that consists of status of
|
||||
individual gatewaypods
|
||||
items:
|
||||
properties:
|
||||
localNsmIP:
|
||||
type: string
|
||||
peerPodName:
|
||||
type: string
|
||||
podIP:
|
||||
type: string
|
||||
podName:
|
||||
type: string
|
||||
routeRemoved:
|
||||
format: int32
|
||||
type: integer
|
||||
tunnelStatus:
|
||||
properties:
|
||||
IntfName:
|
||||
type: string
|
||||
Latency:
|
||||
format: int64
|
||||
type: integer
|
||||
LocalIP:
|
||||
type: string
|
||||
PacketLoss:
|
||||
format: int64
|
||||
type: integer
|
||||
RemoteIP:
|
||||
type: string
|
||||
RxRate:
|
||||
format: int64
|
||||
type: integer
|
||||
Status:
|
||||
format: int32
|
||||
type: integer
|
||||
TxRate:
|
||||
format: int64
|
||||
type: integer
|
||||
type: object
|
||||
type: object
|
||||
type: array
|
||||
peerIp:
|
||||
description: PeerIP is the gateway tunnel peer ip
|
||||
type: string
|
||||
podIp:
|
||||
description: PodIP is the Ip of the gateway pod running in cluster
|
||||
type: string
|
||||
podIps:
|
||||
description: PodIPs is the list of Ip of the gateway pods running
|
||||
in cluster
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
podName:
|
||||
description: PodName is the name of the gateway pod running in cluster
|
||||
description: Deprecated PodName is the name of the gateway pod running
|
||||
in cluster
|
||||
type: string
|
||||
podNames:
|
||||
description: PodNames is the list of names of the gateway pods running
|
||||
in cluster
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
podStatus:
|
||||
description: PodStatus shows whether gateway pod is healthy
|
||||
type: string
|
||||
|
@ -1106,7 +1182,6 @@ data:
|
|||
plural: ""
|
||||
conditions: []
|
||||
storedVersions: []
|
||||
networking.kubeslice.io_slices.yaml: |2-
|
||||
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
operator:
|
||||
image: docker.io/aveshasystems/worker-operator-ent
|
||||
tag: 0.5.0
|
||||
image: aveshasystems/worker-operator-ent
|
||||
tag: 0.10.0
|
||||
pullPolicy: IfNotPresent
|
||||
logLevel: INFO
|
||||
|
||||
|
@ -13,28 +13,28 @@ controllerSecret:
|
|||
|
||||
cluster:
|
||||
name:
|
||||
nodeIp:
|
||||
nodeIp: # This field is deprecated. You can set nodeIPs through cluster spec.
|
||||
endpoint:
|
||||
|
||||
router:
|
||||
image: docker.io/aveshasystems/cmd-nse-vl3
|
||||
tag: 1.0.0
|
||||
tag: 1.0.2
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
routerSidecar:
|
||||
image: docker.io/aveshasystems/kubeslice-router-sidecar
|
||||
tag: 0.3.1
|
||||
tag: 1.4.1
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
netop:
|
||||
networkInterface: eth0
|
||||
image: docker.io/aveshasystems/netops
|
||||
tag: 0.1.1
|
||||
tag: 0.2.0
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
gateway:
|
||||
image: docker.io/aveshasystems/gw-sidecar
|
||||
tag: 0.1.4
|
||||
tag: 0.3.0
|
||||
pullPolicy: IfNotPresent
|
||||
logLevel: INFO
|
||||
|
||||
|
@ -45,16 +45,19 @@ openvpn:
|
|||
pullPolicy: IfNotPresent
|
||||
client:
|
||||
image: docker.io/aveshasystems/openvpn-client.alpine.amd64
|
||||
tag: 1.0.1
|
||||
tag: 1.0.0
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
dns:
|
||||
image: docker.io/aveshasystems/dns
|
||||
tag: 0.0.3
|
||||
tag: 0.1.0
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
jaeger:
|
||||
enabled: false
|
||||
|
||||
events:
|
||||
disabled: false
|
||||
|
||||
metrics:
|
||||
insecure: false
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
dependencies:
|
||||
- name: redis
|
||||
repository: oci://registry-1.docker.io/bitnamicharts
|
||||
version: 17.10.1
|
||||
version: 17.10.3
|
||||
- name: postgresql
|
||||
repository: oci://registry-1.docker.io/bitnamicharts
|
||||
version: 12.4.2
|
||||
version: 12.5.1
|
||||
- name: common
|
||||
repository: oci://registry-1.docker.io/bitnamicharts
|
||||
version: 2.2.5
|
||||
digest: sha256:2e66cac7c89a1c48c71aad455cc484c37c6eb7793c03135c7f6105b0f3bf10b8
|
||||
generated: "2023-05-03T15:53:54.20666706Z"
|
||||
version: 2.3.0
|
||||
digest: sha256:9299cf5d7e82e4ea6c33e1ecc32eb02ae581664403edcb8630cc270e8fa8ba8e
|
||||
generated: "2023-05-15T17:12:01.855038165Z"
|
||||
|
|
|
@ -24,7 +24,7 @@ dependencies:
|
|||
description: Apache Airflow is a tool to express and execute workflows as directed
|
||||
acyclic graphs (DAGs). It includes utilities to schedule tasks, monitor task progress
|
||||
and handle task dependencies.
|
||||
home: https://github.com/bitnami/charts/tree/main/bitnami/airflow
|
||||
home: https://bitnami.com
|
||||
icon: https://svn.apache.org/repos/asf/comdev/project-logos/originals/airflow-1.svg
|
||||
keywords:
|
||||
- apache
|
||||
|
@ -32,10 +32,9 @@ keywords:
|
|||
- workflow
|
||||
- dag
|
||||
maintainers:
|
||||
- name: Bitnami
|
||||
- name: VMware, Inc.
|
||||
url: https://github.com/bitnami/charts
|
||||
name: airflow
|
||||
sources:
|
||||
- https://github.com/bitnami/containers/tree/main/bitnami/airflow
|
||||
- https://airflow.apache.org/
|
||||
version: 14.1.3
|
||||
- https://github.com/bitnami/charts/tree/main/bitnami/airflow
|
||||
version: 14.2.1
|
||||
|
|
|
@ -88,7 +88,7 @@ The command removes all the Kubernetes components associated with the chart and
|
|||
| `dags.existingConfigmap` | Name of an existing ConfigMap with all the DAGs files you want to load in Airflow | `""` |
|
||||
| `dags.image.registry` | Init container load-dags image registry | `docker.io` |
|
||||
| `dags.image.repository` | Init container load-dags image repository | `bitnami/bitnami-shell` |
|
||||
| `dags.image.tag` | Init container load-dags image tag (immutable tags are recommended) | `11-debian-11-r113` |
|
||||
| `dags.image.tag` | Init container load-dags image tag (immutable tags are recommended) | `11-debian-11-r116` |
|
||||
| `dags.image.digest` | Init container load-dags image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
|
||||
| `dags.image.pullPolicy` | Init container load-dags image pull policy | `IfNotPresent` |
|
||||
| `dags.image.pullSecrets` | Init container load-dags image pull secrets | `[]` |
|
||||
|
@ -107,7 +107,7 @@ The command removes all the Kubernetes components associated with the chart and
|
|||
| ------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | -------------------- |
|
||||
| `web.image.registry` | Airflow image registry | `docker.io` |
|
||||
| `web.image.repository` | Airflow image repository | `bitnami/airflow` |
|
||||
| `web.image.tag` | Airflow image tag (immutable tags are recommended) | `2.6.0-debian-11-r2` |
|
||||
| `web.image.tag` | Airflow image tag (immutable tags are recommended) | `2.6.0-debian-11-r5` |
|
||||
| `web.image.digest` | Airflow image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
|
||||
| `web.image.pullPolicy` | Airflow image pull policy | `IfNotPresent` |
|
||||
| `web.image.pullSecrets` | Airflow image pull secrets | `[]` |
|
||||
|
@ -182,7 +182,7 @@ The command removes all the Kubernetes components associated with the chart and
|
|||
| ------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | --------------------------- |
|
||||
| `scheduler.image.registry` | Airflow Scheduler image registry | `docker.io` |
|
||||
| `scheduler.image.repository` | Airflow Scheduler image repository | `bitnami/airflow-scheduler` |
|
||||
| `scheduler.image.tag` | Airflow Scheduler image tag (immutable tags are recommended) | `2.6.0-debian-11-r1` |
|
||||
| `scheduler.image.tag` | Airflow Scheduler image tag (immutable tags are recommended) | `2.6.0-debian-11-r3` |
|
||||
| `scheduler.image.digest` | Airflow Schefuler image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
|
||||
| `scheduler.image.pullPolicy` | Airflow Scheduler image pull policy | `IfNotPresent` |
|
||||
| `scheduler.image.pullSecrets` | Airflow Scheduler image pull secrets | `[]` |
|
||||
|
@ -236,7 +236,7 @@ The command removes all the Kubernetes components associated with the chart and
|
|||
| ---------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | ------------------------ |
|
||||
| `worker.image.registry` | Airflow Worker image registry | `docker.io` |
|
||||
| `worker.image.repository` | Airflow Worker image repository | `bitnami/airflow-worker` |
|
||||
| `worker.image.tag` | Airflow Worker image tag (immutable tags are recommended) | `2.6.0-debian-11-r1` |
|
||||
| `worker.image.tag` | Airflow Worker image tag (immutable tags are recommended) | `2.6.0-debian-11-r3` |
|
||||
| `worker.image.digest` | Airflow Worker image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
|
||||
| `worker.image.pullPolicy` | Airflow Worker image pull policy | `IfNotPresent` |
|
||||
| `worker.image.pullSecrets` | Airflow Worker image pull secrets | `[]` |
|
||||
|
@ -316,7 +316,7 @@ The command removes all the Kubernetes components associated with the chart and
|
|||
| ------------------------------ | --------------------------------------------------------------------------------------------------- | --------------------- |
|
||||
| `git.image.registry` | Git image registry | `docker.io` |
|
||||
| `git.image.repository` | Git image repository | `bitnami/git` |
|
||||
| `git.image.tag` | Git image tag (immutable tags are recommended) | `2.40.1-debian-11-r3` |
|
||||
| `git.image.tag` | Git image tag (immutable tags are recommended) | `2.40.1-debian-11-r6` |
|
||||
| `git.image.digest` | Git image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
|
||||
| `git.image.pullPolicy` | Git image pull policy | `IfNotPresent` |
|
||||
| `git.image.pullSecrets` | Git image pull secrets | `[]` |
|
||||
|
@ -408,7 +408,7 @@ The command removes all the Kubernetes components associated with the chart and
|
|||
| `metrics.enabled` | Whether or not to create a standalone Airflow exporter to expose Airflow metrics | `false` |
|
||||
| `metrics.image.registry` | Airflow exporter image registry | `docker.io` |
|
||||
| `metrics.image.repository` | Airflow exporter image repository | `bitnami/airflow-exporter` |
|
||||
| `metrics.image.tag` | Airflow exporter image tag (immutable tags are recommended) | `0.20220314.0-debian-11-r117` |
|
||||
| `metrics.image.tag` | Airflow exporter image tag (immutable tags are recommended) | `0.20220314.0-debian-11-r120` |
|
||||
| `metrics.image.digest` | Airflow exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
|
||||
| `metrics.image.pullPolicy` | Airflow exporter image pull policy | `IfNotPresent` |
|
||||
| `metrics.image.pullSecrets` | Airflow exporter image pull secrets | `[]` |
|
||||
|
|
|
@ -2,10 +2,10 @@ annotations:
|
|||
category: Infrastructure
|
||||
licenses: Apache-2.0
|
||||
apiVersion: v2
|
||||
appVersion: 2.2.5
|
||||
appVersion: 2.3.0
|
||||
description: A Library Helm Chart for grouping common logic between bitnami charts.
|
||||
This chart is not deployable by itself.
|
||||
home: https://github.com/bitnami/charts/tree/main/bitnami/common
|
||||
home: https://bitnami.com
|
||||
icon: https://bitnami.com/downloads/logos/bitnami-mark.png
|
||||
keywords:
|
||||
- common
|
||||
|
@ -14,11 +14,10 @@ keywords:
|
|||
- function
|
||||
- bitnami
|
||||
maintainers:
|
||||
- name: Bitnami
|
||||
- name: VMware, Inc.
|
||||
url: https://github.com/bitnami/charts
|
||||
name: common
|
||||
sources:
|
||||
- https://github.com/bitnami/charts
|
||||
- https://www.bitnami.com/
|
||||
type: library
|
||||
version: 2.2.5
|
||||
version: 2.3.0
|
||||
|
|
|
@ -2,6 +2,8 @@
|
|||
|
||||
A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between Bitnami charts.
|
||||
|
||||
Looking to use our applications in production? Try [VMware Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog.
|
||||
|
||||
## TL;DR
|
||||
|
||||
```yaml
|
||||
|
|
|
@ -45,7 +45,7 @@ Return the proper Docker Image Registry Secret Names (deprecated: use common.ima
|
|||
|
||||
{{- if (not (empty $pullSecrets)) }}
|
||||
imagePullSecrets:
|
||||
{{- range $pullSecrets }}
|
||||
{{- range $pullSecrets | uniq }}
|
||||
- name: {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
@ -73,7 +73,7 @@ Return the proper Docker Image Registry Secret Names evaluating values as templa
|
|||
|
||||
{{- if (not (empty $pullSecrets)) }}
|
||||
imagePullSecrets:
|
||||
{{- range $pullSecrets }}
|
||||
{{- range $pullSecrets | uniq }}
|
||||
- name: {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
dependencies:
|
||||
- name: common
|
||||
repository: oci://registry-1.docker.io/bitnamicharts
|
||||
version: 2.2.4
|
||||
digest: sha256:829fc25cbbb396161e735c83d152d74a8b3a82d07f08866b885b812d30b920df
|
||||
generated: "2023-04-25T10:37:03.061527+02:00"
|
||||
version: 2.2.5
|
||||
digest: sha256:318f438acfeaced11d9060877d615caf1985417d2865810defaa886d3496f8d3
|
||||
generated: "2023-05-08T19:26:58.084687094Z"
|
||||
|
|
|
@ -2,7 +2,7 @@ annotations:
|
|||
category: Database
|
||||
licenses: Apache-2.0
|
||||
apiVersion: v2
|
||||
appVersion: 15.2.0
|
||||
appVersion: 15.3.0
|
||||
dependencies:
|
||||
- name: common
|
||||
repository: oci://registry-1.docker.io/bitnamicharts
|
||||
|
@ -12,7 +12,7 @@ dependencies:
|
|||
description: PostgreSQL (Postgres) is an open source object-relational database known
|
||||
for reliability and data integrity. ACID-compliant, it supports foreign keys, joins,
|
||||
views, triggers and stored procedures.
|
||||
home: https://github.com/bitnami/charts/tree/main/bitnami/postgresql
|
||||
home: https://bitnami.com
|
||||
icon: https://bitnami.com/assets/stacks/postgresql/img/postgresql-stack-220x234.png
|
||||
keywords:
|
||||
- postgresql
|
||||
|
@ -22,10 +22,9 @@ keywords:
|
|||
- replication
|
||||
- cluster
|
||||
maintainers:
|
||||
- name: Bitnami
|
||||
- name: VMware, Inc.
|
||||
url: https://github.com/bitnami/charts
|
||||
name: postgresql
|
||||
sources:
|
||||
- https://github.com/bitnami/containers/tree/main/bitnami/postgresql
|
||||
- https://www.postgresql.org/
|
||||
version: 12.4.2
|
||||
- https://github.com/bitnami/charts/tree/main/bitnami/postgresql
|
||||
version: 12.5.1
|
||||
|
|
|
@ -98,7 +98,7 @@ kubectl delete pvc -l release=my-release
|
|||
| ---------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------- |
|
||||
| `image.registry` | PostgreSQL image registry | `docker.io` |
|
||||
| `image.repository` | PostgreSQL image repository | `bitnami/postgresql` |
|
||||
| `image.tag` | PostgreSQL image tag (immutable tags are recommended) | `15.2.0-debian-11-r26` |
|
||||
| `image.tag` | PostgreSQL image tag (immutable tags are recommended) | `15.3.0-debian-11-r0` |
|
||||
| `image.digest` | PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
|
||||
| `image.pullPolicy` | PostgreSQL image pull policy | `IfNotPresent` |
|
||||
| `image.pullSecrets` | Specify image pull secrets | `[]` |
|
||||
|
@ -375,7 +375,7 @@ kubectl delete pvc -l release=my-release
|
|||
| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume | `false` |
|
||||
| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` |
|
||||
| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/bitnami-shell` |
|
||||
| `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `11-debian-11-r109` |
|
||||
| `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `11-debian-11-r115` |
|
||||
| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
|
||||
| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` |
|
||||
| `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` |
|
||||
|
@ -403,7 +403,7 @@ kubectl delete pvc -l release=my-release
|
|||
| `metrics.enabled` | Start a prometheus exporter | `false` |
|
||||
| `metrics.image.registry` | PostgreSQL Prometheus Exporter image registry | `docker.io` |
|
||||
| `metrics.image.repository` | PostgreSQL Prometheus Exporter image repository | `bitnami/postgres-exporter` |
|
||||
| `metrics.image.tag` | PostgreSQL Prometheus Exporter image tag (immutable tags are recommended) | `0.12.0-debian-11-r80` |
|
||||
| `metrics.image.tag` | PostgreSQL Prometheus Exporter image tag (immutable tags are recommended) | `0.12.0-debian-11-r86` |
|
||||
| `metrics.image.digest` | PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
|
||||
| `metrics.image.pullPolicy` | PostgreSQL Prometheus Exporter image pull policy | `IfNotPresent` |
|
||||
| `metrics.image.pullSecrets` | Specify image pull secrets | `[]` |
|
||||
|
|
|
@ -2,7 +2,7 @@ annotations:
|
|||
category: Infrastructure
|
||||
licenses: Apache-2.0
|
||||
apiVersion: v2
|
||||
appVersion: 2.2.4
|
||||
appVersion: 2.2.5
|
||||
description: A Library Helm Chart for grouping common logic between bitnami charts.
|
||||
This chart is not deployable by itself.
|
||||
home: https://github.com/bitnami/charts/tree/main/bitnami/common
|
||||
|
@ -21,4 +21,4 @@ sources:
|
|||
- https://github.com/bitnami/charts
|
||||
- https://www.bitnami.com/
|
||||
type: library
|
||||
version: 2.2.4
|
||||
version: 2.2.5
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Bitnami Common Library Chart
|
||||
|
||||
A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts.
|
||||
A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between Bitnami charts.
|
||||
|
||||
## TL;DR
|
||||
|
||||
|
@ -8,7 +8,7 @@ A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for gro
|
|||
dependencies:
|
||||
- name: common
|
||||
version: 1.x.x
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
repository: oci://registry-1.docker.io/bitnamicharts
|
||||
```
|
||||
|
||||
```console
|
||||
|
|
|
@ -95,7 +95,7 @@ diagnosticMode:
|
|||
image:
|
||||
registry: docker.io
|
||||
repository: bitnami/postgresql
|
||||
tag: 15.2.0-debian-11-r26
|
||||
tag: 15.3.0-debian-11-r0
|
||||
digest: ""
|
||||
## Specify a imagePullPolicy
|
||||
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
|
||||
|
@ -1136,7 +1136,7 @@ volumePermissions:
|
|||
image:
|
||||
registry: docker.io
|
||||
repository: bitnami/bitnami-shell
|
||||
tag: 11-debian-11-r109
|
||||
tag: 11-debian-11-r115
|
||||
digest: ""
|
||||
pullPolicy: IfNotPresent
|
||||
## Optionally specify an array of imagePullSecrets.
|
||||
|
@ -1231,7 +1231,7 @@ metrics:
|
|||
image:
|
||||
registry: docker.io
|
||||
repository: bitnami/postgres-exporter
|
||||
tag: 0.12.0-debian-11-r80
|
||||
tag: 0.12.0-debian-11-r86
|
||||
digest: ""
|
||||
pullPolicy: IfNotPresent
|
||||
## Optionally specify an array of imagePullSecrets.
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
dependencies:
|
||||
- name: common
|
||||
repository: oci://registry-1.docker.io/bitnamicharts
|
||||
version: 2.2.4
|
||||
digest: sha256:829fc25cbbb396161e735c83d152d74a8b3a82d07f08866b885b812d30b920df
|
||||
generated: "2023-04-20T09:36:23.406458+02:00"
|
||||
version: 2.2.5
|
||||
digest: sha256:318f438acfeaced11d9060877d615caf1985417d2865810defaa886d3496f8d3
|
||||
generated: "2023-05-09T00:40:30.988475033Z"
|
||||
|
|
|
@ -24,4 +24,4 @@ maintainers:
|
|||
name: redis
|
||||
sources:
|
||||
- https://github.com/bitnami/containers/tree/main/bitnami/redis
|
||||
version: 17.10.1
|
||||
version: 17.10.3
|
||||
|
|
|
@ -76,20 +76,22 @@ The command removes all the Kubernetes components associated with the chart and
|
|||
|
||||
### Common parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ------------------------ | --------------------------------------------------------------------------------------- | --------------- |
|
||||
| `kubeVersion` | Override Kubernetes version | `""` |
|
||||
| `nameOverride` | String to partially override common.names.fullname | `""` |
|
||||
| `fullnameOverride` | String to fully override common.names.fullname | `""` |
|
||||
| `commonLabels` | Labels to add to all deployed objects | `{}` |
|
||||
| `commonAnnotations` | Annotations to add to all deployed objects | `{}` |
|
||||
| `secretAnnotations` | Annotations to add to secret | `{}` |
|
||||
| `clusterDomain` | Kubernetes cluster domain name | `cluster.local` |
|
||||
| `extraDeploy` | Array of extra objects to deploy with the release | `[]` |
|
||||
| `useHostnames` | Use hostnames internally when announcing replication | `true` |
|
||||
| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` |
|
||||
| `diagnosticMode.command` | Command to override all containers in the deployment | `["sleep"]` |
|
||||
| `diagnosticMode.args` | Args to override all containers in the deployment | `["infinity"]` |
|
||||
| Name | Description | Value |
|
||||
| ------------------------- | -------------------------------------------------------------------------------------------------------------- | --------------- |
|
||||
| `kubeVersion` | Override Kubernetes version | `""` |
|
||||
| `nameOverride` | String to partially override common.names.fullname | `""` |
|
||||
| `fullnameOverride` | String to fully override common.names.fullname | `""` |
|
||||
| `commonLabels` | Labels to add to all deployed objects | `{}` |
|
||||
| `commonAnnotations` | Annotations to add to all deployed objects | `{}` |
|
||||
| `secretAnnotations` | Annotations to add to secret | `{}` |
|
||||
| `clusterDomain` | Kubernetes cluster domain name | `cluster.local` |
|
||||
| `extraDeploy` | Array of extra objects to deploy with the release | `[]` |
|
||||
| `useHostnames` | Use hostnames internally when announcing replication. If false, the hostname will be resolved to an IP address | `true` |
|
||||
| `nameResolutionThreshold` | Failure threshold for internal hostnames resolution | `5` |
|
||||
| `nameResolutionTimeout` | Timeout seconds between probes for internal hostnames resolution | `5` |
|
||||
| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` |
|
||||
| `diagnosticMode.command` | Command to override all containers in the deployment | `["sleep"]` |
|
||||
| `diagnosticMode.args` | Args to override all containers in the deployment | `["infinity"]` |
|
||||
|
||||
### Redis® Image parameters
|
||||
|
||||
|
@ -97,7 +99,7 @@ The command removes all the Kubernetes components associated with the chart and
|
|||
| ------------------- | ---------------------------------------------------------------------------------------------------------- | --------------------- |
|
||||
| `image.registry` | Redis® image registry | `docker.io` |
|
||||
| `image.repository` | Redis® image repository | `bitnami/redis` |
|
||||
| `image.tag` | Redis® image tag (immutable tags are recommended) | `7.0.11-debian-11-r0` |
|
||||
| `image.tag` | Redis® image tag (immutable tags are recommended) | `7.0.11-debian-11-r7` |
|
||||
| `image.digest` | Redis® image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
|
||||
| `image.pullPolicy` | Redis® image pull policy | `IfNotPresent` |
|
||||
| `image.pullSecrets` | Redis® image pull secrets | `[]` |
|
||||
|
@ -331,7 +333,7 @@ The command removes all the Kubernetes components associated with the chart and
|
|||
| `sentinel.enabled` | Use Redis® Sentinel on Redis® pods. | `false` |
|
||||
| `sentinel.image.registry` | Redis® Sentinel image registry | `docker.io` |
|
||||
| `sentinel.image.repository` | Redis® Sentinel image repository | `bitnami/redis-sentinel` |
|
||||
| `sentinel.image.tag` | Redis® Sentinel image tag (immutable tags are recommended) | `7.0.10-debian-11-r8` |
|
||||
| `sentinel.image.tag` | Redis® Sentinel image tag (immutable tags are recommended) | `7.0.11-debian-11-r6` |
|
||||
| `sentinel.image.digest` | Redis® Sentinel image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
|
||||
| `sentinel.image.pullPolicy` | Redis® Sentinel image pull policy | `IfNotPresent` |
|
||||
| `sentinel.image.pullSecrets` | Redis® Sentinel image pull secrets | `[]` |
|
||||
|
@ -449,7 +451,7 @@ The command removes all the Kubernetes components associated with the chart and
|
|||
| `metrics.enabled` | Start a sidecar prometheus exporter to expose Redis® metrics | `false` |
|
||||
| `metrics.image.registry` | Redis® Exporter image registry | `docker.io` |
|
||||
| `metrics.image.repository` | Redis® Exporter image repository | `bitnami/redis-exporter` |
|
||||
| `metrics.image.tag` | Redis® Exporter image tag (immutable tags are recommended) | `1.50.0-debian-11-r2` |
|
||||
| `metrics.image.tag` | Redis® Exporter image tag (immutable tags are recommended) | `1.50.0-debian-11-r9` |
|
||||
| `metrics.image.digest` | Redis® Exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
|
||||
| `metrics.image.pullPolicy` | Redis® Exporter image pull policy | `IfNotPresent` |
|
||||
| `metrics.image.pullSecrets` | Redis® Exporter image pull secrets | `[]` |
|
||||
|
@ -514,7 +516,7 @@ The command removes all the Kubernetes components associated with the chart and
|
|||
| `volumePermissions.enabled` | Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup` | `false` |
|
||||
| `volumePermissions.image.registry` | Bitnami Shell image registry | `docker.io` |
|
||||
| `volumePermissions.image.repository` | Bitnami Shell image repository | `bitnami/bitnami-shell` |
|
||||
| `volumePermissions.image.tag` | Bitnami Shell image tag (immutable tags are recommended) | `11-debian-11-r107` |
|
||||
| `volumePermissions.image.tag` | Bitnami Shell image tag (immutable tags are recommended) | `11-debian-11-r114` |
|
||||
| `volumePermissions.image.digest` | Bitnami Shell image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
|
||||
| `volumePermissions.image.pullPolicy` | Bitnami Shell image pull policy | `IfNotPresent` |
|
||||
| `volumePermissions.image.pullSecrets` | Bitnami Shell image pull secrets | `[]` |
|
||||
|
@ -524,7 +526,7 @@ The command removes all the Kubernetes components associated with the chart and
|
|||
| `sysctl.enabled` | Enable init container to modify Kernel settings | `false` |
|
||||
| `sysctl.image.registry` | Bitnami Shell image registry | `docker.io` |
|
||||
| `sysctl.image.repository` | Bitnami Shell image repository | `bitnami/bitnami-shell` |
|
||||
| `sysctl.image.tag` | Bitnami Shell image tag (immutable tags are recommended) | `11-debian-11-r107` |
|
||||
| `sysctl.image.tag` | Bitnami Shell image tag (immutable tags are recommended) | `11-debian-11-r114` |
|
||||
| `sysctl.image.digest` | Bitnami Shell image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
|
||||
| `sysctl.image.pullPolicy` | Bitnami Shell image pull policy | `IfNotPresent` |
|
||||
| `sysctl.image.pullSecrets` | Bitnami Shell image pull secrets | `[]` |
|
||||
|
|
|
@ -2,7 +2,7 @@ annotations:
|
|||
category: Infrastructure
|
||||
licenses: Apache-2.0
|
||||
apiVersion: v2
|
||||
appVersion: 2.2.4
|
||||
appVersion: 2.2.5
|
||||
description: A Library Helm Chart for grouping common logic between bitnami charts.
|
||||
This chart is not deployable by itself.
|
||||
home: https://github.com/bitnami/charts/tree/main/bitnami/common
|
||||
|
@ -21,4 +21,4 @@ sources:
|
|||
- https://github.com/bitnami/charts
|
||||
- https://www.bitnami.com/
|
||||
type: library
|
||||
version: 2.2.4
|
||||
version: 2.2.5
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Bitnami Common Library Chart
|
||||
|
||||
A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts.
|
||||
A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between Bitnami charts.
|
||||
|
||||
## TL;DR
|
||||
|
||||
|
@ -8,7 +8,7 @@ A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for gro
|
|||
dependencies:
|
||||
- name: common
|
||||
version: 1.x.x
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
repository: oci://registry-1.docker.io/bitnamicharts
|
||||
```
|
||||
|
||||
```console
|
||||
|
|
|
@ -54,7 +54,16 @@ data:
|
|||
{{- if .Values.useHostnames }}
|
||||
echo "${full_hostname}"
|
||||
{{- else }}
|
||||
getent hosts "${full_hostname}" | awk '{ print $1 ; exit }'
|
||||
retry_count=0
|
||||
until getent hosts "${full_hostname}" | awk '{ print $1; exit }' | grep .; do
|
||||
if [[ $retry_count -lt {{ .Values.nameResolutionThreshold }} ]]; then
|
||||
sleep {{ .Values.nameResolutionTimeout }}
|
||||
else
|
||||
error "IP address for ${full_hostname} not found"
|
||||
exit 1
|
||||
fi
|
||||
((retry_count++))
|
||||
done
|
||||
{{- end }}
|
||||
}
|
||||
|
||||
|
@ -278,7 +287,16 @@ data:
|
|||
{{- if .Values.useHostnames }}
|
||||
echo "${full_hostname}"
|
||||
{{- else }}
|
||||
getent hosts "${full_hostname}" | awk '{ print $1 ; exit }'
|
||||
retry_count=0
|
||||
until getent hosts "${full_hostname}" | awk '{ print $1; exit }' | grep .; do
|
||||
if [[ $retry_count -lt {{ .Values.nameResolutionThreshold }} ]]; then
|
||||
sleep {{ .Values.nameResolutionTimeout }}
|
||||
else
|
||||
error "IP address for ${full_hostname} not found"
|
||||
exit 1
|
||||
fi
|
||||
((retry_count++))
|
||||
done
|
||||
{{- end }}
|
||||
}
|
||||
|
||||
|
@ -448,7 +466,16 @@ data:
|
|||
{{- if .Values.useHostnames }}
|
||||
echo "${full_hostname}"
|
||||
{{- else }}
|
||||
getent hosts "${full_hostname}" | awk '{ print $1 ; exit }'
|
||||
retry_count=0
|
||||
until getent hosts "${full_hostname}" | awk '{ print $1; exit }' | grep .; do
|
||||
if [[ $retry_count -lt {{ .Values.nameResolutionThreshold }} ]]; then
|
||||
sleep {{ .Values.nameResolutionTimeout }}
|
||||
else
|
||||
error "IP address for ${full_hostname} not found"
|
||||
exit 1
|
||||
fi
|
||||
((retry_count++))
|
||||
done
|
||||
{{- end }}
|
||||
}
|
||||
|
||||
|
@ -521,7 +548,16 @@ data:
|
|||
{{- if .Values.useHostnames }}
|
||||
echo "${full_hostname}"
|
||||
{{- else }}
|
||||
getent hosts "${full_hostname}" | awk '{ print $1 ; exit }'
|
||||
retry_count=0
|
||||
until getent hosts "${full_hostname}" | awk '{ print $1; exit }' | grep .; do
|
||||
if [[ $retry_count -lt {{ .Values.nameResolutionThreshold }} ]]; then
|
||||
sleep {{ .Values.nameResolutionTimeout }}
|
||||
else
|
||||
error "IP address for ${full_hostname} not found"
|
||||
exit 1
|
||||
fi
|
||||
((retry_count++))
|
||||
done
|
||||
{{- end }}
|
||||
}
|
||||
|
||||
|
@ -650,7 +686,16 @@ data:
|
|||
{{- if .Values.useHostnames }}
|
||||
echo "${full_hostname}"
|
||||
{{- else }}
|
||||
getent hosts "${full_hostname}" | awk '{ print $1 ; exit }'
|
||||
retry_count=0
|
||||
until getent hosts "${full_hostname}" | awk '{ print $1; exit }' | grep .; do
|
||||
if [[ $retry_count -lt {{ .Values.nameResolutionThreshold }} ]]; then
|
||||
sleep {{ .Values.nameResolutionTimeout }}
|
||||
else
|
||||
error "IP address for ${full_hostname} not found"
|
||||
exit 1
|
||||
fi
|
||||
((retry_count++))
|
||||
done
|
||||
{{- end }}
|
||||
}
|
||||
|
||||
|
|
|
@ -47,9 +47,15 @@ clusterDomain: cluster.local
|
|||
## @param extraDeploy Array of extra objects to deploy with the release
|
||||
##
|
||||
extraDeploy: []
|
||||
## @param useHostnames Use hostnames internally when announcing replication
|
||||
###
|
||||
## @param useHostnames Use hostnames internally when announcing replication. If false, the hostname will be resolved to an IP address
|
||||
##
|
||||
useHostnames: true
|
||||
## @param nameResolutionThreshold Failure threshold for internal hostnames resolution
|
||||
##
|
||||
nameResolutionThreshold: 5
|
||||
## @param nameResolutionTimeout Timeout seconds between probes for internal hostnames resolution
|
||||
##
|
||||
nameResolutionTimeout: 5
|
||||
|
||||
## Enable diagnostic mode in the deployment
|
||||
##
|
||||
|
@ -82,7 +88,7 @@ diagnosticMode:
|
|||
image:
|
||||
registry: docker.io
|
||||
repository: bitnami/redis
|
||||
tag: 7.0.11-debian-11-r0
|
||||
tag: 7.0.11-debian-11-r7
|
||||
digest: ""
|
||||
## Specify a imagePullPolicy
|
||||
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
|
||||
|
@ -995,7 +1001,7 @@ sentinel:
|
|||
image:
|
||||
registry: docker.io
|
||||
repository: bitnami/redis-sentinel
|
||||
tag: 7.0.10-debian-11-r8
|
||||
tag: 7.0.11-debian-11-r6
|
||||
digest: ""
|
||||
## Specify a imagePullPolicy
|
||||
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
|
||||
|
@ -1437,7 +1443,7 @@ metrics:
|
|||
image:
|
||||
registry: docker.io
|
||||
repository: bitnami/redis-exporter
|
||||
tag: 1.50.0-debian-11-r2
|
||||
tag: 1.50.0-debian-11-r9
|
||||
digest: ""
|
||||
pullPolicy: IfNotPresent
|
||||
## Optionally specify an array of imagePullSecrets.
|
||||
|
@ -1688,7 +1694,7 @@ volumePermissions:
|
|||
image:
|
||||
registry: docker.io
|
||||
repository: bitnami/bitnami-shell
|
||||
tag: 11-debian-11-r107
|
||||
tag: 11-debian-11-r114
|
||||
digest: ""
|
||||
pullPolicy: IfNotPresent
|
||||
## Optionally specify an array of imagePullSecrets.
|
||||
|
@ -1736,7 +1742,7 @@ sysctl:
|
|||
image:
|
||||
registry: docker.io
|
||||
repository: bitnami/bitnami-shell
|
||||
tag: 11-debian-11-r107
|
||||
tag: 11-debian-11-r114
|
||||
digest: ""
|
||||
pullPolicy: IfNotPresent
|
||||
## Optionally specify an array of imagePullSecrets.
|
||||
|
|
|
@ -118,7 +118,7 @@ dags:
|
|||
image:
|
||||
registry: docker.io
|
||||
repository: bitnami/bitnami-shell
|
||||
tag: 11-debian-11-r113
|
||||
tag: 11-debian-11-r116
|
||||
digest: ""
|
||||
pullPolicy: IfNotPresent
|
||||
## Optionally specify an array of imagePullSecrets.
|
||||
|
@ -185,7 +185,7 @@ web:
|
|||
image:
|
||||
registry: docker.io
|
||||
repository: bitnami/airflow
|
||||
tag: 2.6.0-debian-11-r2
|
||||
tag: 2.6.0-debian-11-r5
|
||||
digest: ""
|
||||
## Specify a imagePullPolicy
|
||||
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
|
||||
|
@ -443,7 +443,7 @@ scheduler:
|
|||
image:
|
||||
registry: docker.io
|
||||
repository: bitnami/airflow-scheduler
|
||||
tag: 2.6.0-debian-11-r1
|
||||
tag: 2.6.0-debian-11-r3
|
||||
digest: ""
|
||||
## Specify a imagePullPolicy
|
||||
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
|
||||
|
@ -647,7 +647,7 @@ worker:
|
|||
image:
|
||||
registry: docker.io
|
||||
repository: bitnami/airflow-worker
|
||||
tag: 2.6.0-debian-11-r1
|
||||
tag: 2.6.0-debian-11-r3
|
||||
digest: ""
|
||||
## Specify a imagePullPolicy
|
||||
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
|
||||
|
@ -920,7 +920,7 @@ git:
|
|||
image:
|
||||
registry: docker.io
|
||||
repository: bitnami/git
|
||||
tag: 2.40.1-debian-11-r3
|
||||
tag: 2.40.1-debian-11-r6
|
||||
digest: ""
|
||||
## Specify a imagePullPolicy
|
||||
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
|
||||
|
@ -1283,7 +1283,7 @@ metrics:
|
|||
image:
|
||||
registry: docker.io
|
||||
repository: bitnami/airflow-exporter
|
||||
tag: 0.20220314.0-debian-11-r117
|
||||
tag: 0.20220314.0-debian-11-r120
|
||||
digest: ""
|
||||
pullPolicy: IfNotPresent
|
||||
## Optionally specify an array of imagePullSecrets.
|
||||
|
|
|
@ -6,7 +6,7 @@ annotations:
|
|||
category: Database
|
||||
licenses: Apache-2.0
|
||||
apiVersion: v2
|
||||
appVersion: 15.2.0
|
||||
appVersion: 15.3.0
|
||||
dependencies:
|
||||
- name: common
|
||||
repository: file://./charts/common
|
||||
|
@ -16,7 +16,7 @@ dependencies:
|
|||
description: PostgreSQL (Postgres) is an open source object-relational database known
|
||||
for reliability and data integrity. ACID-compliant, it supports foreign keys, joins,
|
||||
views, triggers and stored procedures.
|
||||
home: https://github.com/bitnami/charts/tree/main/bitnami/postgresql
|
||||
home: https://bitnami.com
|
||||
icon: https://wiki.postgresql.org/images/a/a4/PostgreSQL_logo.3colors.svg
|
||||
keywords:
|
||||
- postgresql
|
||||
|
@ -26,10 +26,9 @@ keywords:
|
|||
- replication
|
||||
- cluster
|
||||
maintainers:
|
||||
- name: Bitnami
|
||||
- name: VMware, Inc.
|
||||
url: https://github.com/bitnami/charts
|
||||
name: postgresql
|
||||
sources:
|
||||
- https://github.com/bitnami/containers/tree/main/bitnami/postgresql
|
||||
- https://www.postgresql.org/
|
||||
version: 12.4.3
|
||||
- https://github.com/bitnami/charts/tree/main/bitnami/postgresql
|
||||
version: 12.5.2
|
||||
|
|
|
@ -98,7 +98,7 @@ kubectl delete pvc -l release=my-release
|
|||
| ---------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------- |
|
||||
| `image.registry` | PostgreSQL image registry | `docker.io` |
|
||||
| `image.repository` | PostgreSQL image repository | `bitnami/postgresql` |
|
||||
| `image.tag` | PostgreSQL image tag (immutable tags are recommended) | `15.2.0-debian-11-r30` |
|
||||
| `image.tag` | PostgreSQL image tag (immutable tags are recommended) | `15.3.0-debian-11-r0` |
|
||||
| `image.digest` | PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
|
||||
| `image.pullPolicy` | PostgreSQL image pull policy | `IfNotPresent` |
|
||||
| `image.pullSecrets` | Specify image pull secrets | `[]` |
|
||||
|
@ -360,13 +360,13 @@ kubectl delete pvc -l release=my-release
|
|||
| `networkPolicy.ingressRules.primaryAccessOnlyFrom.enabled` | Enable ingress rule that makes PostgreSQL primary node only accessible from a particular origin. | `false` |
|
||||
| `networkPolicy.ingressRules.primaryAccessOnlyFrom.namespaceSelector` | Namespace selector label that is allowed to access the PostgreSQL primary node. This label will be used to identified the allowed namespace(s). | `{}` |
|
||||
| `networkPolicy.ingressRules.primaryAccessOnlyFrom.podSelector` | Pods selector label that is allowed to access the PostgreSQL primary node. This label will be used to identified the allowed pod(s). | `{}` |
|
||||
| `networkPolicy.ingressRules.primaryAccessOnlyFrom.customRules` | Custom network policy for the PostgreSQL primary node. | `{}` |
|
||||
| `networkPolicy.ingressRules.primaryAccessOnlyFrom.customRules` | Custom network policy for the PostgreSQL primary node. | `[]` |
|
||||
| `networkPolicy.ingressRules.readReplicasAccessOnlyFrom.enabled` | Enable ingress rule that makes PostgreSQL read-only nodes only accessible from a particular origin. | `false` |
|
||||
| `networkPolicy.ingressRules.readReplicasAccessOnlyFrom.namespaceSelector` | Namespace selector label that is allowed to access the PostgreSQL read-only nodes. This label will be used to identified the allowed namespace(s). | `{}` |
|
||||
| `networkPolicy.ingressRules.readReplicasAccessOnlyFrom.podSelector` | Pods selector label that is allowed to access the PostgreSQL read-only nodes. This label will be used to identified the allowed pod(s). | `{}` |
|
||||
| `networkPolicy.ingressRules.readReplicasAccessOnlyFrom.customRules` | Custom network policy for the PostgreSQL read-only nodes. | `{}` |
|
||||
| `networkPolicy.ingressRules.readReplicasAccessOnlyFrom.customRules` | Custom network policy for the PostgreSQL read-only nodes. | `[]` |
|
||||
| `networkPolicy.egressRules.denyConnectionsToExternal` | Enable egress rule that denies outgoing traffic outside the cluster, except for DNS (port 53). | `false` |
|
||||
| `networkPolicy.egressRules.customRules` | Custom network policy rule | `{}` |
|
||||
| `networkPolicy.egressRules.customRules` | Custom network policy rule | `[]` |
|
||||
|
||||
### Volume Permissions parameters
|
||||
|
||||
|
@ -375,7 +375,7 @@ kubectl delete pvc -l release=my-release
|
|||
| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume | `false` |
|
||||
| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` |
|
||||
| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/bitnami-shell` |
|
||||
| `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `11-debian-11-r114` |
|
||||
| `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `11-debian-11-r115` |
|
||||
| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
|
||||
| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` |
|
||||
| `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` |
|
||||
|
@ -403,7 +403,7 @@ kubectl delete pvc -l release=my-release
|
|||
| `metrics.enabled` | Start a prometheus exporter | `false` |
|
||||
| `metrics.image.registry` | PostgreSQL Prometheus Exporter image registry | `docker.io` |
|
||||
| `metrics.image.repository` | PostgreSQL Prometheus Exporter image repository | `bitnami/postgres-exporter` |
|
||||
| `metrics.image.tag` | PostgreSQL Prometheus Exporter image tag (immutable tags are recommended) | `0.12.0-debian-11-r84` |
|
||||
| `metrics.image.tag` | PostgreSQL Prometheus Exporter image tag (immutable tags are recommended) | `0.12.0-debian-11-r86` |
|
||||
| `metrics.image.digest` | PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
|
||||
| `metrics.image.pullPolicy` | PostgreSQL Prometheus Exporter image pull policy | `IfNotPresent` |
|
||||
| `metrics.image.pullSecrets` | Specify image pull secrets | `[]` |
|
||||
|
|
|
@ -95,7 +95,7 @@ diagnosticMode:
|
|||
image:
|
||||
registry: docker.io
|
||||
repository: bitnami/postgresql
|
||||
tag: 15.2.0-debian-11-r30
|
||||
tag: 15.3.0-debian-11-r0
|
||||
digest: ""
|
||||
## Specify a imagePullPolicy
|
||||
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
|
||||
|
@ -1051,7 +1051,7 @@ networkPolicy:
|
|||
## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.enabled Enable ingress rule that makes PostgreSQL primary node only accessible from a particular origin.
|
||||
## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.namespaceSelector [object] Namespace selector label that is allowed to access the PostgreSQL primary node. This label will be used to identified the allowed namespace(s).
|
||||
## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.podSelector [object] Pods selector label that is allowed to access the PostgreSQL primary node. This label will be used to identified the allowed pod(s).
|
||||
## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.customRules [object] Custom network policy for the PostgreSQL primary node.
|
||||
## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.customRules Custom network policy for the PostgreSQL primary node.
|
||||
##
|
||||
primaryAccessOnlyFrom:
|
||||
enabled: false
|
||||
|
@ -1073,11 +1073,11 @@ networkPolicy:
|
|||
## matchLabels:
|
||||
## label: example
|
||||
##
|
||||
customRules: {}
|
||||
customRules: []
|
||||
## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.enabled Enable ingress rule that makes PostgreSQL read-only nodes only accessible from a particular origin.
|
||||
## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.namespaceSelector [object] Namespace selector label that is allowed to access the PostgreSQL read-only nodes. This label will be used to identified the allowed namespace(s).
|
||||
## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.podSelector [object] Pods selector label that is allowed to access the PostgreSQL read-only nodes. This label will be used to identified the allowed pod(s).
|
||||
## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.customRules [object] Custom network policy for the PostgreSQL read-only nodes.
|
||||
## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.customRules Custom network policy for the PostgreSQL read-only nodes.
|
||||
##
|
||||
readReplicasAccessOnlyFrom:
|
||||
enabled: false
|
||||
|
@ -1099,9 +1099,9 @@ networkPolicy:
|
|||
## matchLabels:
|
||||
## label: example
|
||||
##
|
||||
customRules: {}
|
||||
customRules: []
|
||||
## @param networkPolicy.egressRules.denyConnectionsToExternal Enable egress rule that denies outgoing traffic outside the cluster, except for DNS (port 53).
|
||||
## @param networkPolicy.egressRules.customRules [object] Custom network policy rule
|
||||
## @param networkPolicy.egressRules.customRules Custom network policy rule
|
||||
##
|
||||
egressRules:
|
||||
# Deny connections to external. This is not compatible with an external database.
|
||||
|
@ -1114,7 +1114,7 @@ networkPolicy:
|
|||
## matchLabels:
|
||||
## label: example
|
||||
##
|
||||
customRules: {}
|
||||
customRules: []
|
||||
|
||||
## @section Volume Permissions parameters
|
||||
##
|
||||
|
@ -1136,7 +1136,7 @@ volumePermissions:
|
|||
image:
|
||||
registry: docker.io
|
||||
repository: bitnami/bitnami-shell
|
||||
tag: 11-debian-11-r114
|
||||
tag: 11-debian-11-r115
|
||||
digest: ""
|
||||
pullPolicy: IfNotPresent
|
||||
## Optionally specify an array of imagePullSecrets.
|
||||
|
@ -1231,7 +1231,7 @@ metrics:
|
|||
image:
|
||||
registry: docker.io
|
||||
repository: bitnami/postgres-exporter
|
||||
tag: 0.12.0-debian-11-r84
|
||||
tag: 0.12.0-debian-11-r86
|
||||
digest: ""
|
||||
pullPolicy: IfNotPresent
|
||||
## Optionally specify an array of imagePullSecrets.
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
dependencies:
|
||||
- name: common
|
||||
repository: oci://registry-1.docker.io/bitnamicharts
|
||||
version: 2.2.5
|
||||
digest: sha256:318f438acfeaced11d9060877d615caf1985417d2865810defaa886d3496f8d3
|
||||
generated: "2023-05-03T01:40:45.008497116Z"
|
||||
version: 2.3.0
|
||||
digest: sha256:11bbe86be64062d37b725f4dbc909aba3585b4976624ee9d27522366d3f956ea
|
||||
generated: "2023-05-15T18:10:36.415128525Z"
|
||||
|
|
|
@ -16,16 +16,15 @@ dependencies:
|
|||
description: Apache Spark is a high-performance engine for large-scale computing tasks,
|
||||
such as data processing, machine learning and real-time data streaming. It includes
|
||||
APIs for Java, Python, Scala and R.
|
||||
home: https://github.com/bitnami/charts/tree/main/bitnami/spark
|
||||
home: https://bitnami.com
|
||||
icon: https://www.apache.org/logos/res/spark/default.png
|
||||
keywords:
|
||||
- apache
|
||||
- spark
|
||||
maintainers:
|
||||
- name: Bitnami
|
||||
- name: VMware, Inc.
|
||||
url: https://github.com/bitnami/charts
|
||||
name: spark
|
||||
sources:
|
||||
- https://github.com/bitnami/containers/tree/main/bitnami/spark
|
||||
- https://spark.apache.org/
|
||||
version: 6.5.3
|
||||
- https://github.com/bitnami/charts/tree/main/bitnami/spark
|
||||
version: 6.6.1
|
||||
|
|
|
@ -84,7 +84,7 @@ The command removes all the Kubernetes components associated with the chart and
|
|||
| ------------------- | ----------------------------------------------------------------------------------------------------- | --------------------- |
|
||||
| `image.registry` | Spark image registry | `docker.io` |
|
||||
| `image.repository` | Spark image repository | `bitnami/spark` |
|
||||
| `image.tag` | Spark image tag (immutable tags are recommended) | `3.3.2-debian-11-r24` |
|
||||
| `image.tag` | Spark image tag (immutable tags are recommended) | `3.3.2-debian-11-r27` |
|
||||
| `image.digest` | Spark image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
|
||||
| `image.pullPolicy` | Spark image pull policy | `IfNotPresent` |
|
||||
| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` |
|
||||
|
|
|
@ -2,10 +2,10 @@ annotations:
|
|||
category: Infrastructure
|
||||
licenses: Apache-2.0
|
||||
apiVersion: v2
|
||||
appVersion: 2.2.5
|
||||
appVersion: 2.3.0
|
||||
description: A Library Helm Chart for grouping common logic between bitnami charts.
|
||||
This chart is not deployable by itself.
|
||||
home: https://github.com/bitnami/charts/tree/main/bitnami/common
|
||||
home: https://bitnami.com
|
||||
icon: https://bitnami.com/downloads/logos/bitnami-mark.png
|
||||
keywords:
|
||||
- common
|
||||
|
@ -14,11 +14,10 @@ keywords:
|
|||
- function
|
||||
- bitnami
|
||||
maintainers:
|
||||
- name: Bitnami
|
||||
- name: VMware, Inc.
|
||||
url: https://github.com/bitnami/charts
|
||||
name: common
|
||||
sources:
|
||||
- https://github.com/bitnami/charts
|
||||
- https://www.bitnami.com/
|
||||
type: library
|
||||
version: 2.2.5
|
||||
version: 2.3.0
|
||||
|
|
|
@ -2,6 +2,8 @@
|
|||
|
||||
A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between Bitnami charts.
|
||||
|
||||
Looking to use our applications in production? Try [VMware Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog.
|
||||
|
||||
## TL;DR
|
||||
|
||||
```yaml
|
||||
|
|
|
@ -45,7 +45,7 @@ Return the proper Docker Image Registry Secret Names (deprecated: use common.ima
|
|||
|
||||
{{- if (not (empty $pullSecrets)) }}
|
||||
imagePullSecrets:
|
||||
{{- range $pullSecrets }}
|
||||
{{- range $pullSecrets | uniq }}
|
||||
- name: {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
@ -73,7 +73,7 @@ Return the proper Docker Image Registry Secret Names evaluating values as templa
|
|||
|
||||
{{- if (not (empty $pullSecrets)) }}
|
||||
imagePullSecrets:
|
||||
{{- range $pullSecrets }}
|
||||
{{- range $pullSecrets | uniq }}
|
||||
- name: {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
|
|
@ -92,7 +92,7 @@ diagnosticMode:
|
|||
image:
|
||||
registry: docker.io
|
||||
repository: bitnami/spark
|
||||
tag: 3.3.2-debian-11-r24
|
||||
tag: 3.3.2-debian-11-r27
|
||||
digest: ""
|
||||
## Specify a imagePullPolicy
|
||||
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
|
||||
|
|
|
@ -15,15 +15,14 @@ dependencies:
|
|||
version: 2.x.x
|
||||
description: Apache ZooKeeper provides a reliable, centralized register of configuration
|
||||
data and services for distributed applications.
|
||||
home: https://github.com/bitnami/charts/tree/main/bitnami/zookeeper
|
||||
home: https://bitnami.com
|
||||
icon: https://svn.apache.org/repos/asf/comdev/project-logos/originals/zookeeper.svg
|
||||
keywords:
|
||||
- zookeeper
|
||||
maintainers:
|
||||
- name: Bitnami
|
||||
- name: VMware, Inc.
|
||||
url: https://github.com/bitnami/charts
|
||||
name: zookeeper
|
||||
sources:
|
||||
- https://github.com/bitnami/containers/tree/main/bitnami/zookeeper
|
||||
- https://zookeeper.apache.org/
|
||||
version: 11.3.2
|
||||
- https://github.com/bitnami/charts/tree/main/bitnami/zookeeper
|
||||
version: 11.4.1
|
||||
|
|
|
@ -8,6 +8,8 @@ Apache ZooKeeper provides a reliable, centralized register of configuration data
|
|||
|
||||
Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement.
|
||||
|
||||
Looking to use Apache ZooKeeper in production? Try [VMware Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog.
|
||||
|
||||
## TL;DR
|
||||
|
||||
```console
|
||||
|
|
|
@ -95,7 +95,7 @@ data:
|
|||
ORD=${BASH_REMATCH[2]}
|
||||
export ZOO_SERVER_ID="$((ORD + {{ .Values.minServerId }} ))"
|
||||
else
|
||||
echo "Failed to get index from hostname $HOST"
|
||||
echo "Failed to get index from hostname $HOSTNAME"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
|
|
@ -4,7 +4,7 @@ annotations:
|
|||
catalog.cattle.io/kube-version: '>=1.8-0'
|
||||
catalog.cattle.io/release-name: cockroachdb
|
||||
apiVersion: v1
|
||||
appVersion: 22.2.9
|
||||
appVersion: 23.1.1
|
||||
description: CockroachDB is a scalable, survivable, strongly-consistent SQL database.
|
||||
home: https://www.cockroachlabs.com
|
||||
icon: https://raw.githubusercontent.com/cockroachdb/cockroach/master/docs/media/cockroach_db.png
|
||||
|
@ -14,4 +14,4 @@ maintainers:
|
|||
name: cockroachdb
|
||||
sources:
|
||||
- https://github.com/cockroachdb/cockroach
|
||||
version: 10.0.9
|
||||
version: 11.0.0
|
||||
|
|
|
@ -229,10 +229,10 @@ kubectl get pods \
|
|||
```
|
||||
|
||||
```
|
||||
my-release-cockroachdb-0 cockroachdb/cockroach:v22.2.9
|
||||
my-release-cockroachdb-1 cockroachdb/cockroach:v22.2.9
|
||||
my-release-cockroachdb-2 cockroachdb/cockroach:v22.2.9
|
||||
my-release-cockroachdb-3 cockroachdb/cockroach:v22.2.9
|
||||
my-release-cockroachdb-0 cockroachdb/cockroach:v23.1.1
|
||||
my-release-cockroachdb-1 cockroachdb/cockroach:v23.1.1
|
||||
my-release-cockroachdb-2 cockroachdb/cockroach:v23.1.1
|
||||
my-release-cockroachdb-3 cockroachdb/cockroach:v23.1.1
|
||||
```
|
||||
|
||||
Resume normal operations. Once you are comfortable that the stability and performance of the cluster is what you'd expect post-upgrade, finalize the upgrade:
|
||||
|
@ -287,7 +287,7 @@ Verify that no pod is deleted and then upgrade as normal. A new StatefulSet will
|
|||
|
||||
For more information about upgrading a cluster to the latest major release of CockroachDB, see [Upgrade to CockroachDB v21.1](https://www.cockroachlabs.com/docs/stable/upgrade-cockroach-version.html).
|
||||
|
||||
Note that there are some backward-incompatible changes to SQL features between versions 20.2 and 21.1. For details, see the [CockroachDB v22.2.9 release notes](https://www.cockroachlabs.com/docs/releases/v22.2.9.html#backward-incompatible-changes).
|
||||
Note that there are some backward-incompatible changes to SQL features between versions 20.2 and 21.1. For details, see the [CockroachDB v23.1.1 release notes](https://www.cockroachlabs.com/docs/releases/v23.1.1.html#backward-incompatible-changes).
|
||||
|
||||
## Configuration
|
||||
|
||||
|
@ -316,7 +316,7 @@ For details see the [`values.yaml`](values.yaml) file.
|
|||
| `conf.store.size` | CockroachDB storage size | `""` |
|
||||
| `conf.store.attrs` | CockroachDB storage attributes | `""` |
|
||||
| `image.repository` | Container image name | `cockroachdb/cockroach` |
|
||||
| `image.tag` | Container image tag | `v22.2.9` |
|
||||
| `image.tag` | Container image tag | `v23.1.1` |
|
||||
| `image.pullPolicy` | Container pull policy | `IfNotPresent` |
|
||||
| `image.credentials` | `registry`, `user` and `pass` credentials to pull private image | `{}` |
|
||||
| `statefulset.replicas` | StatefulSet replicas number | `3` |
|
||||
|
|
|
@ -274,13 +274,18 @@ Validate that if user enabled tls, then either self-signed certificates or certi
|
|||
{{- end -}}
|
||||
|
||||
{{- define "cockroachdb.securityContext.versionValidation" }}
|
||||
{{/* Allow using `securityContext` for custom images. */}}
|
||||
{{- if ne "cockroachdb/cockroach" .Values.image.repository -}}
|
||||
{{ print true }}
|
||||
{{- else -}}
|
||||
{{- if semverCompare ">=22.1.2" .Values.image.tag -}}
|
||||
{{ print true }}
|
||||
{{- else }}
|
||||
{{- else -}}
|
||||
{{- if semverCompare ">=21.2.13, <22.1.0" .Values.image.tag -}}
|
||||
{{ print true }}
|
||||
{{- else }}
|
||||
{{- else -}}
|
||||
{{ print false }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
|
|
@ -25,14 +25,12 @@ spec:
|
|||
app.kubernetes.io/instance: {{ .Release.Name | quote }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
|
||||
spec:
|
||||
{{- if eq (include "cockroachdb.securityContext.versionValidation" .) "true" }}
|
||||
{{- if and .Values.tls.certs.selfSigner.securityContext.enabled }}
|
||||
securityContext:
|
||||
runAsGroup: 1000
|
||||
runAsUser: 1000
|
||||
fsGroup: 1000
|
||||
runAsNonRoot: true
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
restartPolicy: Never
|
||||
containers:
|
||||
|
|
|
@ -25,14 +25,12 @@ spec:
|
|||
app.kubernetes.io/instance: {{ .Release.Name | quote }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
|
||||
spec:
|
||||
{{- if eq (include "cockroachdb.securityContext.versionValidation" .) "true" }}
|
||||
{{- if and .Values.tls.certs.selfSigner.securityContext.enabled }}
|
||||
securityContext:
|
||||
runAsGroup: 1000
|
||||
runAsUser: 1000
|
||||
fsGroup: 1000
|
||||
runAsNonRoot: true
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
restartPolicy: Never
|
||||
containers:
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
# Generated file, DO NOT EDIT. Source: build/templates/values.yaml
|
||||
image:
|
||||
repository: cockroachdb/cockroach
|
||||
tag: v22.2.9
|
||||
tag: v23.1.1
|
||||
pullPolicy: IfNotPresent
|
||||
credentials: {}
|
||||
# registry: docker.io
|
||||
|
|
|
@ -1,5 +1,9 @@
|
|||
# Datadog changelog
|
||||
|
||||
## 3.29.1
|
||||
|
||||
* Add `customresourcedefinitions` option to enable CRD metrics collection in KSM Core.
|
||||
|
||||
## 3.29.0
|
||||
|
||||
* Add `datadog.securityAgent.compliance.xccdf.enabled` parameter to enable XCCDF feature in CSPM.
|
||||
|
|
|
@ -19,4 +19,4 @@ name: datadog
|
|||
sources:
|
||||
- https://app.datadoghq.com/account/settings#agent/kubernetes
|
||||
- https://github.com/DataDog/datadog-agent
|
||||
version: 3.29.0
|
||||
version: 3.29.1
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Datadog
|
||||
|
||||
![Version: 3.29.0](https://img.shields.io/badge/Version-3.29.0-informational?style=flat-square) ![AppVersion: 7](https://img.shields.io/badge/AppVersion-7-informational?style=flat-square)
|
||||
![Version: 3.29.1](https://img.shields.io/badge/Version-3.29.1-informational?style=flat-square) ![AppVersion: 7](https://img.shields.io/badge/AppVersion-7-informational?style=flat-square)
|
||||
|
||||
[Datadog](https://www.datadoghq.com/) is a hosted infrastructure monitoring platform. This chart adds the Datadog Agent to all nodes in your cluster via a DaemonSet. It also optionally depends on the [kube-state-metrics chart](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-state-metrics). For more information about monitoring Kubernetes with Datadog, please refer to the [Datadog documentation website](https://docs.datadoghq.com/agent/basic_agent_usage/kubernetes/).
|
||||
|
||||
|
@ -636,6 +636,7 @@ helm install <RELEASE_NAME> \
|
|||
| datadog.hostVolumeMountPropagation | string | `"None"` | Allow to specify the `mountPropagation` value on all volumeMounts using HostPath |
|
||||
| datadog.ignoreAutoConfig | list | `[]` | List of integration to ignore auto_conf.yaml. |
|
||||
| datadog.kubeStateMetricsCore.annotationsAsTags | object | `{}` | Extra annotations to collect from resources and to turn into datadog tag. |
|
||||
| datadog.kubeStateMetricsCore.collectCrdMetrics | bool | `false` | Enable watching CRD objects and collecting their corresponding metrics kubernetes_state.crd.* |
|
||||
| datadog.kubeStateMetricsCore.collectSecretMetrics | bool | `true` | Enable watching secret objects and collecting their corresponding metrics kubernetes_state.secret.* |
|
||||
| datadog.kubeStateMetricsCore.collectVpaMetrics | bool | `false` | Enable watching VPA objects and collecting their corresponding metrics kubernetes_state.vpa.* |
|
||||
| datadog.kubeStateMetricsCore.enabled | bool | `true` | Enable the kubernetes_state_core check in the Cluster Agent (Requires Cluster Agent 1.12.0+) |
|
||||
|
|
|
@ -11,6 +11,9 @@ kubernetes_state_core.yaml.default: |-
|
|||
{{- end }}
|
||||
{{- if .Values.datadog.kubeStateMetricsCore.collectVpaMetrics }}
|
||||
- verticalpodautoscalers
|
||||
{{- end }}
|
||||
{{- if .Values.datadog.kubeStateMetricsCore.collectCrdMetrics }}
|
||||
- customresourcedefinitions
|
||||
{{- end }}
|
||||
- nodes
|
||||
- pods
|
||||
|
|
|
@ -156,6 +156,11 @@ datadog:
|
|||
## Configuring this field will change the default kubernetes_state_core check configuration and the RBACs granted to Datadog Cluster Agent to run the kubernetes_state_core check.
|
||||
collectVpaMetrics: false
|
||||
|
||||
# datadog.kubeStateMetricsCore.collectCrdMetrics -- Enable watching CRD objects and collecting their corresponding metrics kubernetes_state.crd.*
|
||||
|
||||
## Configuring this field will change the default kubernetes_state_core check configuration to run the kubernetes_state_core check.
|
||||
collectCrdMetrics: false
|
||||
|
||||
# datadog.kubeStateMetricsCore.useClusterCheckRunners -- For large clusters where the Kubernetes State Metrics Check Core needs to be distributed on dedicated workers.
|
||||
|
||||
## Configuring this field will create a separate deployment which will run Cluster Checks, including Kubernetes State Metrics Core.
|
||||
|
|
|
@ -9,7 +9,7 @@ annotations:
|
|||
catalog.cattle.io/kube-version: '>=1.21-0'
|
||||
catalog.cattle.io/release-name: instana-agent
|
||||
apiVersion: v2
|
||||
appVersion: 1.248.0
|
||||
appVersion: 1.249.0
|
||||
description: Instana Agent for Kubernetes
|
||||
home: https://www.instana.com/
|
||||
icon: https://agents.instana.io/helm/stan-logo-2020.png
|
||||
|
@ -23,4 +23,4 @@ maintainers:
|
|||
name: instana-agent
|
||||
sources:
|
||||
- https://github.com/instana/instana-agent-docker
|
||||
version: 1.2.58
|
||||
version: 1.2.59
|
||||
|
|
|
@ -333,6 +333,9 @@ zones:
|
|||
|
||||
## Changelog
|
||||
|
||||
### 1.2.59
|
||||
* Introduce unique selectorLabels and commonLabels for k8s-sensor deployment
|
||||
|
||||
### 1.2.58
|
||||
* Default to `internalTrafficPolicy` instead of `topologyKeys` for rendering of static YAMLs
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue