Charts CI
``` Updated: argo/argo-cd: - 5.16.2 asserts/asserts: - 1.17.0 avesha/kubeslice-controller: - 0.4.3 avesha/kubeslice-worker: - 0.4.6 bitnami/kafka: - 20.0.0 bitnami/redis: - 17.3.14 bitnami/wordpress: - 15.2.18 bitnami/zookeeper: - 11.0.0 citrix/citrix-cpx-with-ingress-controller: - 1.28.2 citrix/citrix-ingress-controller: - 1.28.2 cockroach-labs/cockroachdb: - 10.0.0 codefresh/cf-runtime: - 1.8.0 datadog/datadog: - 3.5.1 gitlab/gitlab: - 6.6.2 jaeger/jaeger-operator: - 2.38.0 jenkins/jenkins: - 4.2.17 kuma/kuma: - 2.0.1 minio/minio-operator: - 4.5.5 redpanda/redpanda: - 2.3.14 speedscale/speedscale-operator: - 1.2.6 sysdig/sysdig: - 1.15.60 traefik/traefik: - 20.7.0 ```pull/605/head
parent
863de305de
commit
c03ad55354
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -1,14 +1,12 @@
|
|||
annotations:
|
||||
artifacthub.io/changes: |
|
||||
- "[Fixed]: Set aggregate roles only for using resources"
|
||||
- "[Added]: Add argocdextensions to aggregate roles"
|
||||
- "[Fixed]: Fix typo of notification.bots.slack.image in values"
|
||||
- "[Changed]: Update ArgoCD to v2.5.4"
|
||||
catalog.cattle.io/certified: partner
|
||||
catalog.cattle.io/display-name: Argo CD
|
||||
catalog.cattle.io/kube-version: '>=1.22.0-0'
|
||||
catalog.cattle.io/release-name: argo-cd
|
||||
apiVersion: v2
|
||||
appVersion: v2.5.3
|
||||
appVersion: v2.5.4
|
||||
dependencies:
|
||||
- condition: redis-ha.enabled
|
||||
name: redis-ha
|
||||
|
@ -30,4 +28,4 @@ name: argo-cd
|
|||
sources:
|
||||
- https://github.com/argoproj/argo-helm/tree/main/charts/argo-cd
|
||||
- https://github.com/argoproj/argo-cd
|
||||
version: 5.16.1
|
||||
version: 5.16.2
|
||||
|
|
|
@ -53,4 +53,4 @@ maintainers:
|
|||
url: https://github.com/asserts
|
||||
name: asserts
|
||||
type: application
|
||||
version: 1.16.0
|
||||
version: 1.17.0
|
||||
|
|
|
@ -0,0 +1,37 @@
|
|||
{{/*
|
||||
authorization name
|
||||
*/}}
|
||||
{{- define "asserts.authorizationName" -}}
|
||||
{{- if .Values.authorization.nameOverride -}}
|
||||
{{- .Values.authorization.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{ include "asserts.name" . }}-authorization
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
authorization fullname
|
||||
*/}}
|
||||
{{- define "asserts.authorizationFullname" -}}
|
||||
{{- if .Values.authorization.fullnameOverride -}}
|
||||
{{- .Values.authorization.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{ include "asserts.fullname" . }}-authorization
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
authorization common labels
|
||||
*/}}
|
||||
{{- define "asserts.authorizationLabels" -}}
|
||||
{{ include "asserts.labels" . }}
|
||||
app.kubernetes.io/component: authorization
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
authorization selector labels
|
||||
*/}}
|
||||
{{- define "asserts.authorizationSelectorLabels" -}}
|
||||
{{ include "asserts.selectorLabels" . }}
|
||||
app.kubernetes.io/component: authorization
|
||||
{{- end }}
|
|
@ -0,0 +1,127 @@
|
|||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ include "asserts.authorizationFullname" . }}
|
||||
labels: {{- include "asserts.authorizationLabels" . | nindent 4 }}
|
||||
{{- with .Values.extraLabels }}
|
||||
{{- toYaml . | nindent 4 -}}
|
||||
{{- end }}
|
||||
{{- if .Values.annotations }}
|
||||
annotations:
|
||||
{{- toYaml .Values.annotations | nindent 4 -}}
|
||||
{{- end }}
|
||||
data:
|
||||
application.yml: |-
|
||||
server:
|
||||
port: {{.Values.authorization.service.port}}
|
||||
max-http-header-size: 32KB
|
||||
servlet:
|
||||
context-path: "/authorization"
|
||||
|
||||
management:
|
||||
endpoint:
|
||||
health:
|
||||
show-details: when-authorized
|
||||
info:
|
||||
enabled: true
|
||||
prometheus:
|
||||
enabled: true
|
||||
endpoints:
|
||||
web:
|
||||
exposure:
|
||||
include: info, health, prometheus
|
||||
|
||||
logging:
|
||||
level:
|
||||
root: INFO
|
||||
pattern:
|
||||
console: '%-5p %d{ISO8601,UTC} %t %c{0} %mdc%n %m%n%rEx'
|
||||
|
||||
tenant:
|
||||
cache_seconds: 5
|
||||
mode: config
|
||||
cluster:
|
||||
tenant:
|
||||
selector: config
|
||||
|
||||
asserts:
|
||||
identityprovider:
|
||||
tenant: {{ include "asserts.tenant" . }}
|
||||
standalone:
|
||||
tenant:
|
||||
enabled: 'true'
|
||||
name: Asserts Bootstrap
|
||||
grafana:
|
||||
admin_password: "${GRAFANA_ADMIN_PASSWORD}"
|
||||
orgId: '1'
|
||||
dataSource: Prometheus
|
||||
password: "${GRAFANA_ADMIN_PASSWORD}"
|
||||
username: admin
|
||||
url: http://{{.Release.Name}}-grafana.{{include "domain" .}}:{{.Values.grafana.service.port}}
|
||||
admin_username: admin
|
||||
tsdb:
|
||||
tsdbUrl: http://{{.Release.Name}}-promxyruler.{{include "domain" .}}:8082
|
||||
password: ''
|
||||
username: ''
|
||||
accountId: ''
|
||||
graph:
|
||||
server: {{.Values.redisgraph.fullnameOverride}}-master.{{include "domain" .}}
|
||||
port: '6379'
|
||||
sentinel_mode: {{ ternary "'1'" "'0'" (.Values.redisgraph.sentinel.enabled)}}
|
||||
master_name: mymaster
|
||||
sentinels: {{ include "asserts.graphSentinelHosts" . }}
|
||||
search:
|
||||
server: {{.Values.redisearch.fullnameOverride}}-master.{{include "domain" .}}
|
||||
port: '6379'
|
||||
sentinel_mode: {{ ternary "'1'" "'0'" (.Values.redisearch.sentinel.enabled)}}
|
||||
master_name: mymaster
|
||||
sentinels: {{ include "asserts.searchSentinelHosts" . }}
|
||||
tsdb:
|
||||
url: http://{{.Release.Name}}-tsdb-server.{{include "domain" .}}:8428
|
||||
oauth2:
|
||||
cookie:
|
||||
domain: ""
|
||||
secure: {{ .Values.global.secureCookie }}
|
||||
security:
|
||||
hmac:
|
||||
currentSymmetricKey: muS5uSK2ZjexXhbaqNm18ktqaCAICI51
|
||||
assertsReadOnlyRoleName: ASSERTS
|
||||
oauth2:
|
||||
internal:
|
||||
enabled: false
|
||||
store:
|
||||
owner:
|
||||
datasource:
|
||||
hikari:
|
||||
maximum-pool-size: 1
|
||||
url: jdbc:postgresql://{{.Values.postgres.fullnameOverride}}.{{include "domain" .}}:5432/asserts
|
||||
username: postgres
|
||||
password: ${POSTGRES_RDB_PASSWORD}
|
||||
user:
|
||||
datasource:
|
||||
url: ${store.owner.datasource.url}
|
||||
username: tenant_user
|
||||
password: ${POSTGRES_RDB_PASSWORD}
|
||||
|
||||
spring:
|
||||
jackson:
|
||||
# exclude nulls from api responses
|
||||
default-property-inclusion: NON_NULL
|
||||
jpa:
|
||||
database: POSTGRESQL
|
||||
open-in-view: false
|
||||
properties:
|
||||
hibernate:
|
||||
dialect: org.hibernate.dialect.PostgreSQL92Dialect
|
||||
physical_naming_strategy: org.springframework.boot.orm.jpa.hibernate.SpringPhysicalNamingStrategy
|
||||
implicit_naming_strategy: org.springframework.boot.orm.jpa.hibernate.SpringImplicitNamingStrategy
|
||||
liquibase:
|
||||
enabled: true
|
||||
changeLog: classpath:/db/changelog/db.changelog-master.yaml
|
||||
parameters:
|
||||
database: asserts
|
||||
schema: public
|
||||
username: ${store.user.datasource.username}
|
||||
password: ${store.user.datasource.password}
|
||||
bootstrap_enabled: "true"
|
||||
|
|
@ -0,0 +1,86 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "asserts.authorizationFullname" . }}
|
||||
labels: {{- include "asserts.authorizationSelectorLabels" . | nindent 4 }}
|
||||
{{- with .Values.authorization.extraLabels }}
|
||||
{{- toYaml . | nindent 4 -}}
|
||||
{{- end }}
|
||||
{{- if .Values.authorization.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 -}}
|
||||
{{- end }}
|
||||
spec:
|
||||
replicas: {{ .Values.authorization.replicaCount }}
|
||||
selector:
|
||||
matchLabels: {{- include "asserts.authorizationSelectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
labels: {{- include "asserts.authorizationLabels" . | nindent 8 }}
|
||||
{{- with .Values.authorization.extraPodLabels }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
annotations:
|
||||
checksum/configmap: {{ include (print $.Template.BasePath "/authorization/configmap.yaml") . | sha256sum }}
|
||||
{{- with .Values.authorization.extraPodAnnotations }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.authorization.imagePullSecrets }}
|
||||
imagePullSecrets: {{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "asserts.serviceAccountName" . }}
|
||||
{{- if .Values.authorization.initContainers }}
|
||||
initContainers: {{ include "common.tplvalues.render" ( dict "value" .Values.authorization.initContainers "context" $) | nindent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: {{ include "asserts.authorizationFullname" . }}
|
||||
image: "{{ .Values.authorization.image.repository }}:{{ .Values.authorization.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.authorization.image.pullPolicy }}
|
||||
env:
|
||||
# set POSTGRES_RDB_PASSWORD generated from
|
||||
# postgres subchart for consumption by the Asserts authorization server
|
||||
- name: POSTGRES_RDB_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Values.postgres.fullnameOverride }}
|
||||
key: postgres-password
|
||||
optional: false
|
||||
{{- with .Values.authorization.extraEnv }}
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.authorization.extraEnvFrom }}
|
||||
envFrom: {{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: {{ .Values.authorization.service.port }}
|
||||
resources: {{- toYaml .Values.authorization.resources | nindent 12 }}
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /opt/asserts/authorization/conf/application.yml
|
||||
subPath: application.yml
|
||||
readOnly: true
|
||||
{{- with .Values.authorization.extraVolumeMounts }}
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.authorization.nodeSelector }}
|
||||
nodeSelector: {{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.authorization.affinity }}
|
||||
affinity: {{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.authorization.tolerations }}
|
||||
tolerations: {{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: {{ include "asserts.authorizationFullname" . }}
|
||||
items:
|
||||
- key: application.yml
|
||||
path: application.yml
|
||||
{{- with .Values.authorization.extraVolumes }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
|
@ -0,0 +1,20 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "asserts.authorizationFullname" . }}
|
||||
labels: {{- include "asserts.authorizationLabels" . | nindent 4 }}
|
||||
{{- with .Values.authorization.extraLabels }}
|
||||
{{- toYaml . | nindent 4 -}}
|
||||
{{- end }}
|
||||
{{- if .Values.authorization.annotations }}
|
||||
annotations:
|
||||
{{- toYaml .Values.authorization.annotations | nindent 4 -}}
|
||||
{{- end }}
|
||||
spec:
|
||||
type: {{ .Values.authorization.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.authorization.service.port }}
|
||||
targetPort: {{ .Values.authorization.service.port }}
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector: {{- include "asserts.authorizationSelectorLabels" . | nindent 4 }}
|
|
@ -227,6 +227,109 @@ data:
|
|||
action: replace
|
||||
{{- end }}
|
||||
|
||||
- job_name: {{ .Release.Name }}-authorization
|
||||
kubernetes_sd_configs:
|
||||
- namespaces:
|
||||
names:
|
||||
- {{ .Release.Namespace }}
|
||||
role: endpoints
|
||||
honor_timestamps: true
|
||||
metrics_path: /authorization/actuator/prometheus
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_service_label_app_kubernetes_io_name]
|
||||
separator: ;
|
||||
regex: asserts
|
||||
replacement: $1
|
||||
action: keep
|
||||
- source_labels: [__meta_kubernetes_service_label_app_kubernetes_io_instance]
|
||||
separator: ;
|
||||
regex: {{ .Release.Name }}
|
||||
replacement: $1
|
||||
action: keep
|
||||
- source_labels: [__meta_kubernetes_service_label_app_kubernetes_io_component]
|
||||
separator: ;
|
||||
regex: authorization
|
||||
replacement: $1
|
||||
action: keep
|
||||
- source_labels: [__meta_kubernetes_endpoint_port_name]
|
||||
separator: ;
|
||||
regex: http
|
||||
replacement: $1
|
||||
action: keep
|
||||
- source_labels: [__meta_kubernetes_endpoint_address_target_kind, __meta_kubernetes_endpoint_address_target_name]
|
||||
separator: ;
|
||||
regex: Node;(.*)
|
||||
target_label: node
|
||||
replacement: ${1}
|
||||
action: replace
|
||||
- source_labels: [__meta_kubernetes_endpoint_address_target_kind, __meta_kubernetes_endpoint_address_target_name]
|
||||
separator: ;
|
||||
regex: Pod;(.*)
|
||||
target_label: pod
|
||||
replacement: ${1}
|
||||
action: replace
|
||||
- source_labels: [__meta_kubernetes_namespace]
|
||||
separator: ;
|
||||
regex: (.*)
|
||||
target_label: namespace
|
||||
replacement: $1
|
||||
action: replace
|
||||
- source_labels: [__meta_kubernetes_service_name]
|
||||
separator: ;
|
||||
regex: (.*)
|
||||
target_label: service
|
||||
replacement: $1
|
||||
action: replace
|
||||
- source_labels: [__meta_kubernetes_pod_name]
|
||||
separator: ;
|
||||
regex: (.*)
|
||||
target_label: pod
|
||||
replacement: $1
|
||||
action: replace
|
||||
- source_labels: [__meta_kubernetes_pod_container_name]
|
||||
separator: ;
|
||||
regex: (.*)
|
||||
target_label: container
|
||||
replacement: $1
|
||||
action: replace
|
||||
- source_labels: [__meta_kubernetes_service_name]
|
||||
separator: ;
|
||||
regex: (.*)
|
||||
target_label: job
|
||||
replacement: ${1}
|
||||
action: replace
|
||||
- separator: ;
|
||||
regex: (.*)
|
||||
target_label: endpoint
|
||||
replacement: http
|
||||
action: replace
|
||||
- separator: ;
|
||||
regex: (.*)
|
||||
target_label: endpoint
|
||||
replacement: http
|
||||
action: replace
|
||||
# add tenant, asserts_env, & asserts_site
|
||||
# to all remaining values metrics if applicable
|
||||
- separator: ;
|
||||
regex: (.*)
|
||||
target_label: tenant
|
||||
replacement: {{ include "asserts.tenant" . }}
|
||||
action: replace
|
||||
{{- if .Values.assertsClusterEnv }}
|
||||
- separator: ;
|
||||
regex: (.*)
|
||||
target_label: asserts_env
|
||||
replacement: {{ .Values.assertsClusterEnv }}
|
||||
action: replace
|
||||
{{- end }}
|
||||
{{- if .Values.assertsClusterSite }}
|
||||
- separator: ;
|
||||
regex: (.*)
|
||||
target_label: asserts_site
|
||||
replacement: {{ .Values.assertsClusterSite }}
|
||||
action: replace
|
||||
{{- end }}
|
||||
|
||||
- job_name: {{ .Release.Name }}-tsdb-server
|
||||
kubernetes_sd_configs:
|
||||
- namespaces:
|
||||
|
|
|
@ -166,11 +166,12 @@ data:
|
|||
url: http://{{.Release.Name}}-tsdb-server.{{include "domain" .}}:8428
|
||||
oauth2:
|
||||
cookie:
|
||||
domain: localhost
|
||||
secure: false
|
||||
domain: ""
|
||||
secure: {{ .Values.global.secureCookie }}
|
||||
security:
|
||||
hmac:
|
||||
currentSymmetricKey: muS5uSK2ZjexXhbaqNm18ktqaCAICI51
|
||||
assertsReadOnlyRoleName: ASSERTS
|
||||
oauth2:
|
||||
internal:
|
||||
enabled: false
|
||||
|
|
|
@ -11,7 +11,7 @@ metadata:
|
|||
{{- toYaml . | nindent 4 -}}
|
||||
{{- end }}
|
||||
spec:
|
||||
replicas: 1
|
||||
replicas: {{ .Values.server.replicaCount }}
|
||||
selector:
|
||||
matchLabels: {{- include "asserts.serverSelectorLabels" . | nindent 6 }}
|
||||
serviceName: {{ include "asserts.serverFullname" . }}
|
||||
|
@ -37,7 +37,7 @@ spec:
|
|||
{{- end }}
|
||||
containers:
|
||||
- name: server
|
||||
image: "{{ .Values.server.image.repository }}:{{ .Values.server.image.tag | default .Chart.AppVersion }}"
|
||||
image: "{{ .Values.server.image.repository }}:{{ .Values.server.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.server.image.pullPolicy | quote }}
|
||||
env:
|
||||
# set POSTGRES_RDB_PASSWORD generated from
|
||||
|
|
|
@ -16,11 +16,24 @@ data:
|
|||
proxy_set_header X-Asserts-Tenant '{{ include "asserts.tenant" . }}';
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection 'upgrade';
|
||||
proxy_set_header Host localhost:8030;
|
||||
proxy_set_header Host localhost:{{ .Values.server.service.port }};
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Original-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_pass http://{{ .Release.Name }}-server.{{ include "domain" . }}:8030;
|
||||
proxy_pass http://{{ .Release.Name }}-server.{{ include "domain" . }}:{{ .Values.server.service.port }};
|
||||
proxy_buffering off;
|
||||
}
|
||||
location /authorization {
|
||||
proxy_http_version 1.1;
|
||||
proxy_cache_bypass $http_upgrade;
|
||||
proxy_set_header X-Asserts-Tenant '{{ include "asserts.tenant" . }}';
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection 'upgrade';
|
||||
proxy_set_header Host localhost:{{ .Values.authorization.service.port }};
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Original-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_pass http://{{ .Release.Name }}-authorization.{{ include "domain" . }}:{{ .Values.authorization.service.port }};
|
||||
proxy_buffering off;
|
||||
}
|
||||
error_page 500 502 503 504 /50x.html;
|
||||
|
|
|
@ -32,7 +32,7 @@ spec:
|
|||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
|
||||
image: "{{ .Values.ui.image.repository }}:{{ .Values.ui.image.tag | default .Chart.AppVersion }}"
|
||||
image: "{{ .Values.ui.image.repository }}:{{ .Values.ui.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.ui.image.pullPolicy }}
|
||||
|
||||
volumeMounts:
|
||||
|
|
|
@ -3,9 +3,11 @@
|
|||
## This will override any available parameters in this chart
|
||||
## as well as dependent charts
|
||||
##
|
||||
## Current available global parameters: storageClass
|
||||
## Current available global parameters: storageClass, secureCookie
|
||||
global:
|
||||
storageClass: ""
|
||||
# set to true if oauth is configured on Asserts and exclusively using https
|
||||
secureCookie: false
|
||||
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
@ -50,6 +52,12 @@ serviceMonitor:
|
|||
- sourceLabels: [job]
|
||||
regex: "{{.Release.Name}}-server"
|
||||
action: keep
|
||||
- port: http
|
||||
path: /authorization/actuator/prometheus
|
||||
relabelings:
|
||||
- sourceLabels: [job]
|
||||
regex: "{{.Release.Name}}-authorization"
|
||||
action: keep
|
||||
- port: http
|
||||
path: /metrics
|
||||
relabelings:
|
||||
|
@ -126,6 +134,8 @@ server:
|
|||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: asserts/asserts-server
|
||||
pullPolicy: IfNotPresent
|
||||
|
@ -205,6 +215,64 @@ server:
|
|||
## When set, will use the existing PVC for persistence
|
||||
existingClaim: ""
|
||||
|
||||
## Asserts authorization server configuration
|
||||
##
|
||||
authorization:
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: asserts/authorization
|
||||
pullPolicy: IfNotPresent
|
||||
## Overrides the image tag whose default is the chart appVersion.
|
||||
tag: v0.2.415
|
||||
|
||||
initContainers:
|
||||
- name: wait-for-postgres
|
||||
image: asserts/wait-for:v2.2.3
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- "{{.Values.postgres.fullnameOverride}}.{{.Release.Namespace}}.{{.Values.clusterDomain}}:5432"
|
||||
- "-t"
|
||||
- "420"
|
||||
|
||||
imagePullSecrets: []
|
||||
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate: {}
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 8070
|
||||
|
||||
resources: {}
|
||||
|
||||
## environment variables to add to the asserts-server pod
|
||||
extraEnv: []
|
||||
|
||||
## environment variables from secrets or configmaps to add to the asserts-server pod
|
||||
extraEnvFrom: []
|
||||
|
||||
annotations: {}
|
||||
|
||||
extraLabels: {}
|
||||
|
||||
extraPodLabels: {}
|
||||
|
||||
extraPodAnnotations: {}
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
extraVolumeMounts: []
|
||||
|
||||
extraVolumes: []
|
||||
|
||||
## Asserts ui configuration
|
||||
##
|
||||
|
@ -212,6 +280,8 @@ ui:
|
|||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: asserts/asserts-ui
|
||||
pullPolicy: IfNotPresent
|
||||
|
|
|
@ -32,4 +32,4 @@ keywords:
|
|||
kubeVersion: '>= 1.19'
|
||||
name: kubeslice-controller
|
||||
type: application
|
||||
version: 0.4.2
|
||||
version: 0.4.3
|
||||
|
|
|
@ -22,7 +22,7 @@ This chart installs the following:
|
|||
- KubeSlice Manager
|
||||
- Kubeslice dashboard for user interactions.
|
||||
|
||||
📖 For step-by-step instructions, go to [documentation](https://docs.avesha.io/documentation/enterprise/0.1.0/deployment-partners/deploying-kubeslice-on-rancher/).
|
||||
📖 For step-by-step instructions, go to [documentation](https://docs.avesha.io/documentation/enterprise/0.2.0/deployment-partners/deploying-kubeslice-on-rancher/).
|
||||
|
||||
This chart will install our enterprise edition of KubeSlice.
|
||||
|
||||
|
|
|
@ -12,13 +12,13 @@ kubeslice:
|
|||
tag: 0.2.0
|
||||
pullPolicy: IfNotPresent
|
||||
ovpnJob:
|
||||
image: aveshadev/gateway-certs-generator
|
||||
tag: 0.1.5-SNAPSHOT-26aa6173
|
||||
image: aveshasystems/gateway-certs-generator
|
||||
tag: 0.1.5
|
||||
|
||||
# Kubeslice UI settings
|
||||
ui:
|
||||
image: aveshasystems/kubeslice-ui-ent
|
||||
tag: 0.2.0
|
||||
tag: 0.2.3
|
||||
pullPolicy: IfNotPresent
|
||||
dashboard:
|
||||
image: aveshasystems/kubeslice-kubernetes-dashboard
|
||||
|
|
|
@ -5,7 +5,7 @@ annotations:
|
|||
catalog.cattle.io/namespace: kubeslice-system
|
||||
catalog.cattle.io/release-name: kubeslice-worker
|
||||
apiVersion: v2
|
||||
appVersion: 0.2.3
|
||||
appVersion: 0.2.1
|
||||
description: A Helm chart for Kubeslice Worker Operator
|
||||
icon: https://kubeslice.io/documentation/open-source/img/kubeslice-logo.svg
|
||||
keywords:
|
||||
|
@ -32,4 +32,4 @@ keywords:
|
|||
kubeVersion: '>= 1.19'
|
||||
name: kubeslice-worker
|
||||
type: application
|
||||
version: 0.4.5
|
||||
version: 0.4.6
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
operator:
|
||||
image: aveshadev/worker-operator
|
||||
tag: 0.10.0-SNAPSHOT-e17d9350
|
||||
image: docker.io/aveshasystems/worker-operator-ent
|
||||
tag: 0.2.1
|
||||
pullPolicy: IfNotPresent
|
||||
logLevel: INFO
|
||||
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
dependencies:
|
||||
- name: zookeeper
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
version: 10.2.5
|
||||
version: 11.0.0
|
||||
- name: common
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
version: 2.2.1
|
||||
digest: sha256:a7b6919993123d2aa5fac072d980523e4e3eee61ae8a236f321c2d205921911a
|
||||
generated: "2022-11-30T07:44:11.034596899Z"
|
||||
digest: sha256:80c409365c99c5b2953b4f40f9ed47d4e41ca8407df5569ef82c9e5fdf3546ce
|
||||
generated: "2022-12-06T10:41:37.745302+01:00"
|
||||
|
|
|
@ -10,7 +10,7 @@ dependencies:
|
|||
- condition: zookeeper.enabled
|
||||
name: zookeeper
|
||||
repository: file://./charts/zookeeper
|
||||
version: 10.x.x
|
||||
version: 11.x.x
|
||||
- name: common
|
||||
repository: file://./charts/common
|
||||
tags:
|
||||
|
@ -34,4 +34,4 @@ name: kafka
|
|||
sources:
|
||||
- https://github.com/bitnami/containers/tree/main/bitnami/kafka
|
||||
- https://kafka.apache.org/
|
||||
version: 19.1.5
|
||||
version: 20.0.0
|
||||
|
|
|
@ -851,6 +851,10 @@ Find more information about how to deal with common errors related to Bitnami's
|
|||
|
||||
## Upgrading
|
||||
|
||||
### To 20.0.0
|
||||
|
||||
This major updates the Zookeeper subchart to it newest major, 11.0.0. For more information on this subchart's major, please refer to [zookeeper upgrade notes](https://github.com/bitnami/charts/tree/main/bitnami/zookeeper#to-1100).
|
||||
|
||||
### To 19.0.0
|
||||
|
||||
This major updates Kafka to its newest version, 3.3.x. For more information, please refer to [kafka upgrade notes](https://kafka.apache.org/33/documentation.html#upgrade).
|
||||
|
|
|
@ -21,4 +21,4 @@ name: zookeeper
|
|||
sources:
|
||||
- https://github.com/bitnami/containers/tree/main/bitnami/zookeeper
|
||||
- https://zookeeper.apache.org/
|
||||
version: 10.2.5
|
||||
version: 11.0.0
|
||||
|
|
|
@ -240,6 +240,7 @@ The command removes all the Kubernetes components associated with the chart and
|
|||
| `persistence.accessModes` | PVC Access modes | `["ReadWriteOnce"]` |
|
||||
| `persistence.size` | PVC Storage Request for ZooKeeper data volume | `8Gi` |
|
||||
| `persistence.annotations` | Annotations for the PVC | `{}` |
|
||||
| `persistence.labels` | Labels for the PVC | `{}` |
|
||||
| `persistence.selector` | Selector to match an existing Persistent Volume for ZooKeeper's data PVC | `{}` |
|
||||
| `persistence.dataLogDir.size` | PVC Storage Request for ZooKeeper's dedicated data log directory | `8Gi` |
|
||||
| `persistence.dataLogDir.existingClaim` | Provide an existing `PersistentVolumeClaim` for ZooKeeper's data log directory | `""` |
|
||||
|
|
|
@ -488,16 +488,11 @@ spec:
|
|||
{{- if not .Values.persistence.existingClaim }}
|
||||
- metadata:
|
||||
name: data
|
||||
annotations:
|
||||
{{- range $key, $value := .Values.persistence.annotations }}
|
||||
{{ $key }}: {{ $value }}
|
||||
{{- if .Values.persistence.annotations }}
|
||||
annotations: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.annotations "context" $) | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- if .Values.commonAnnotations }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- if .Values.commonLabels }}
|
||||
labels:
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 10 }}
|
||||
{{- if .Values.persistence.labels }}
|
||||
labels: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.labels "context" $) | nindent 10 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
accessModes:
|
||||
|
@ -515,16 +510,11 @@ spec:
|
|||
{{- if and (not .Values.persistence.dataLogDir.existingClaim) .Values.dataLogDir }}
|
||||
- metadata:
|
||||
name: data-log
|
||||
annotations:
|
||||
{{- range $key, $value := .Values.persistence.annotations }}
|
||||
{{ $key }}: {{ $value }}
|
||||
{{- if .Values.persistence.annotations }}
|
||||
annotations: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.annotations "context" $) | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- if .Values.commonAnnotations }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- if .Values.commonLabels }}
|
||||
labels:
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 10 }}
|
||||
{{- if .Values.persistence.labels }}
|
||||
labels: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.labels "context" $) | nindent 10 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
accessModes:
|
||||
|
|
|
@ -607,6 +607,9 @@ persistence:
|
|||
## @param persistence.annotations Annotations for the PVC
|
||||
##
|
||||
annotations: {}
|
||||
## @param persistence.labels Labels for the PVC
|
||||
##
|
||||
labels: {}
|
||||
## @param persistence.selector Selector to match an existing Persistent Volume for ZooKeeper's data PVC
|
||||
## If set, the PVC can't have a PV dynamically provisioned for it
|
||||
## E.g.
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
dependencies:
|
||||
- name: common
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
version: 2.1.2
|
||||
digest: sha256:1c365a4551a2f4098e9584dc176b289c10437c679c7c3e2ec6153cabf863e1a4
|
||||
generated: "2022-11-03T05:41:14.817736977Z"
|
||||
version: 2.2.1
|
||||
digest: sha256:6c67cfa9945bf608209d4e2ca8f17079fca4770907c7902d984187ab5b21811e
|
||||
generated: "2022-12-03T09:54:47.520083528Z"
|
||||
|
|
|
@ -27,4 +27,4 @@ maintainers:
|
|||
name: redis
|
||||
sources:
|
||||
- https://github.com/bitnami/containers/tree/main/bitnami/redis
|
||||
version: 17.3.13
|
||||
version: 17.3.14
|
||||
|
|
|
@ -99,7 +99,7 @@ The command removes all the Kubernetes components associated with the chart and
|
|||
| ------------------- | ---------------------------------------------------------------------------------------------------------- | --------------------- |
|
||||
| `image.registry` | Redis® image registry | `docker.io` |
|
||||
| `image.repository` | Redis® image repository | `bitnami/redis` |
|
||||
| `image.tag` | Redis® image tag (immutable tags are recommended) | `7.0.5-debian-11-r15` |
|
||||
| `image.tag` | Redis® image tag (immutable tags are recommended) | `7.0.5-debian-11-r25` |
|
||||
| `image.digest` | Redis® image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
|
||||
| `image.pullPolicy` | Redis® image pull policy | `IfNotPresent` |
|
||||
| `image.pullSecrets` | Redis® image pull secrets | `[]` |
|
||||
|
@ -334,7 +334,7 @@ The command removes all the Kubernetes components associated with the chart and
|
|||
| `sentinel.enabled` | Use Redis® Sentinel on Redis® pods. | `false` |
|
||||
| `sentinel.image.registry` | Redis® Sentinel image registry | `docker.io` |
|
||||
| `sentinel.image.repository` | Redis® Sentinel image repository | `bitnami/redis-sentinel` |
|
||||
| `sentinel.image.tag` | Redis® Sentinel image tag (immutable tags are recommended) | `7.0.5-debian-11-r14` |
|
||||
| `sentinel.image.tag` | Redis® Sentinel image tag (immutable tags are recommended) | `7.0.5-debian-11-r24` |
|
||||
| `sentinel.image.digest` | Redis® Sentinel image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
|
||||
| `sentinel.image.pullPolicy` | Redis® Sentinel image pull policy | `IfNotPresent` |
|
||||
| `sentinel.image.pullSecrets` | Redis® Sentinel image pull secrets | `[]` |
|
||||
|
@ -448,7 +448,7 @@ The command removes all the Kubernetes components associated with the chart and
|
|||
| `metrics.enabled` | Start a sidecar prometheus exporter to expose Redis® metrics | `false` |
|
||||
| `metrics.image.registry` | Redis® Exporter image registry | `docker.io` |
|
||||
| `metrics.image.repository` | Redis® Exporter image repository | `bitnami/redis-exporter` |
|
||||
| `metrics.image.tag` | Redis® Exporter image tag (immutable tags are recommended) | `1.45.0-debian-11-r1` |
|
||||
| `metrics.image.tag` | Redis® Exporter image tag (immutable tags are recommended) | `1.45.0-debian-11-r11` |
|
||||
| `metrics.image.digest` | Redis® Exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
|
||||
| `metrics.image.pullPolicy` | Redis® Exporter image pull policy | `IfNotPresent` |
|
||||
| `metrics.image.pullSecrets` | Redis® Exporter image pull secrets | `[]` |
|
||||
|
@ -493,7 +493,7 @@ The command removes all the Kubernetes components associated with the chart and
|
|||
| `volumePermissions.enabled` | Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup` | `false` |
|
||||
| `volumePermissions.image.registry` | Bitnami Shell image registry | `docker.io` |
|
||||
| `volumePermissions.image.repository` | Bitnami Shell image repository | `bitnami/bitnami-shell` |
|
||||
| `volumePermissions.image.tag` | Bitnami Shell image tag (immutable tags are recommended) | `11-debian-11-r48` |
|
||||
| `volumePermissions.image.tag` | Bitnami Shell image tag (immutable tags are recommended) | `11-debian-11-r58` |
|
||||
| `volumePermissions.image.digest` | Bitnami Shell image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
|
||||
| `volumePermissions.image.pullPolicy` | Bitnami Shell image pull policy | `IfNotPresent` |
|
||||
| `volumePermissions.image.pullSecrets` | Bitnami Shell image pull secrets | `[]` |
|
||||
|
@ -503,7 +503,7 @@ The command removes all the Kubernetes components associated with the chart and
|
|||
| `sysctl.enabled` | Enable init container to modify Kernel settings | `false` |
|
||||
| `sysctl.image.registry` | Bitnami Shell image registry | `docker.io` |
|
||||
| `sysctl.image.repository` | Bitnami Shell image repository | `bitnami/bitnami-shell` |
|
||||
| `sysctl.image.tag` | Bitnami Shell image tag (immutable tags are recommended) | `11-debian-11-r48` |
|
||||
| `sysctl.image.tag` | Bitnami Shell image tag (immutable tags are recommended) | `11-debian-11-r58` |
|
||||
| `sysctl.image.digest` | Bitnami Shell image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
|
||||
| `sysctl.image.pullPolicy` | Bitnami Shell image pull policy | `IfNotPresent` |
|
||||
| `sysctl.image.pullSecrets` | Bitnami Shell image pull secrets | `[]` |
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
annotations:
|
||||
category: Infrastructure
|
||||
apiVersion: v2
|
||||
appVersion: 2.1.2
|
||||
appVersion: 2.2.1
|
||||
description: A Library Helm Chart for grouping common logic between bitnami charts.
|
||||
This chart is not deployable by itself.
|
||||
home: https://github.com/bitnami/charts/tree/main/bitnami/common
|
||||
|
@ -20,4 +20,4 @@ sources:
|
|||
- https://github.com/bitnami/charts
|
||||
- https://www.bitnami.com/
|
||||
type: library
|
||||
version: 2.1.2
|
||||
version: 2.2.1
|
||||
|
|
|
@ -47,6 +47,7 @@ The following table lists the helpers available in the library which are scoped
|
|||
| `common.affinities.nodes.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` |
|
||||
| `common.affinities.pods.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` |
|
||||
| `common.affinities.pods.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` |
|
||||
| `common.affinities.topologyKey` | Return a topologyKey definition | `dict "topologyKey" "FOO"` |
|
||||
|
||||
### Capabilities
|
||||
|
||||
|
@ -108,10 +109,10 @@ The following table lists the helpers available in the library which are scoped
|
|||
### Secrets
|
||||
|
||||
| Helper identifier | Description | Expected Input |
|
||||
|---------------------------|--------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
|-----------------------------------|--------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. |
|
||||
| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. |
|
||||
| `common.passwords.manage` | Generate secret password or retrieve one if already created. | `dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $`, length, strong and chartNAme fields are optional. |
|
||||
| `common.secrets.passwords.manage` | Generate secret password or retrieve one if already created. | `dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $`, length, strong and chartNAme fields are optional. |
|
||||
| `common.secrets.exists` | Returns whether a previous generated secret already exists. | `dict "secret" "secret-name" "context" $` |
|
||||
|
||||
### Storage
|
||||
|
|
|
@ -45,9 +45,17 @@ Return a nodeAffinity definition
|
|||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return a topologyKey definition
|
||||
{{ include "common.affinities.topologyKey" (dict "topologyKey" "BAR") -}}
|
||||
*/}}
|
||||
{{- define "common.affinities.topologyKey" -}}
|
||||
{{ .topologyKey | default "kubernetes.io/hostname" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return a soft podAffinity/podAntiAffinity definition
|
||||
{{ include "common.affinities.pods.soft" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}}
|
||||
{{ include "common.affinities.pods.soft" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "context" $) -}}
|
||||
*/}}
|
||||
{{- define "common.affinities.pods.soft" -}}
|
||||
{{- $component := default "" .component -}}
|
||||
|
@ -62,13 +70,13 @@ preferredDuringSchedulingIgnoredDuringExecution:
|
|||
{{- range $key, $value := $extraMatchLabels }}
|
||||
{{ $key }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
topologyKey: kubernetes.io/hostname
|
||||
topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }}
|
||||
weight: 1
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return a hard podAffinity/podAntiAffinity definition
|
||||
{{ include "common.affinities.pods.hard" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}}
|
||||
{{ include "common.affinities.pods.hard" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "context" $) -}}
|
||||
*/}}
|
||||
{{- define "common.affinities.pods.hard" -}}
|
||||
{{- $component := default "" .component -}}
|
||||
|
@ -82,7 +90,7 @@ requiredDuringSchedulingIgnoredDuringExecution:
|
|||
{{- range $key, $value := $extraMatchLabels }}
|
||||
{{ $key }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
topologyKey: kubernetes.io/hostname
|
||||
topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
|
|
|
@ -79,7 +79,7 @@ diagnosticMode:
|
|||
image:
|
||||
registry: docker.io
|
||||
repository: bitnami/redis
|
||||
tag: 7.0.5-debian-11-r15
|
||||
tag: 7.0.5-debian-11-r25
|
||||
digest: ""
|
||||
## Specify a imagePullPolicy
|
||||
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
|
||||
|
@ -976,7 +976,7 @@ sentinel:
|
|||
image:
|
||||
registry: docker.io
|
||||
repository: bitnami/redis-sentinel
|
||||
tag: 7.0.5-debian-11-r14
|
||||
tag: 7.0.5-debian-11-r24
|
||||
digest: ""
|
||||
## Specify a imagePullPolicy
|
||||
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
|
||||
|
@ -1393,7 +1393,7 @@ metrics:
|
|||
image:
|
||||
registry: docker.io
|
||||
repository: bitnami/redis-exporter
|
||||
tag: 1.45.0-debian-11-r1
|
||||
tag: 1.45.0-debian-11-r11
|
||||
digest: ""
|
||||
pullPolicy: IfNotPresent
|
||||
## Optionally specify an array of imagePullSecrets.
|
||||
|
@ -1591,7 +1591,7 @@ volumePermissions:
|
|||
image:
|
||||
registry: docker.io
|
||||
repository: bitnami/bitnami-shell
|
||||
tag: 11-debian-11-r48
|
||||
tag: 11-debian-11-r58
|
||||
digest: ""
|
||||
pullPolicy: IfNotPresent
|
||||
## Optionally specify an array of imagePullSecrets.
|
||||
|
@ -1639,7 +1639,7 @@ sysctl:
|
|||
image:
|
||||
registry: docker.io
|
||||
repository: bitnami/bitnami-shell
|
||||
tag: 11-debian-11-r48
|
||||
tag: 11-debian-11-r58
|
||||
digest: ""
|
||||
pullPolicy: IfNotPresent
|
||||
## Optionally specify an array of imagePullSecrets.
|
||||
|
|
|
@ -40,4 +40,4 @@ name: wordpress
|
|||
sources:
|
||||
- https://github.com/bitnami/containers/tree/main/bitnami/wordpress
|
||||
- https://wordpress.org/
|
||||
version: 15.2.17
|
||||
version: 15.2.18
|
||||
|
|
|
@ -85,7 +85,7 @@ The command removes all the Kubernetes components associated with the chart and
|
|||
| ------------------- | --------------------------------------------------------------------------------------------------------- | -------------------- |
|
||||
| `image.registry` | WordPress image registry | `docker.io` |
|
||||
| `image.repository` | WordPress image repository | `bitnami/wordpress` |
|
||||
| `image.tag` | WordPress image tag (immutable tags are recommended) | `6.1.1-debian-11-r6` |
|
||||
| `image.tag` | WordPress image tag (immutable tags are recommended) | `6.1.1-debian-11-r8` |
|
||||
| `image.digest` | WordPress image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
|
||||
| `image.pullPolicy` | WordPress image pull policy | `IfNotPresent` |
|
||||
| `image.pullSecrets` | WordPress image pull secrets | `[]` |
|
||||
|
@ -255,7 +255,7 @@ The command removes all the Kubernetes components associated with the chart and
|
|||
| `volumePermissions.enabled` | Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup` | `false` |
|
||||
| `volumePermissions.image.registry` | Bitnami Shell image registry | `docker.io` |
|
||||
| `volumePermissions.image.repository` | Bitnami Shell image repository | `bitnami/bitnami-shell` |
|
||||
| `volumePermissions.image.tag` | Bitnami Shell image tag (immutable tags are recommended) | `11-debian-11-r57` |
|
||||
| `volumePermissions.image.tag` | Bitnami Shell image tag (immutable tags are recommended) | `11-debian-11-r59` |
|
||||
| `volumePermissions.image.digest` | Bitnami Shell image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
|
||||
| `volumePermissions.image.pullPolicy` | Bitnami Shell image pull policy | `IfNotPresent` |
|
||||
| `volumePermissions.image.pullSecrets` | Bitnami Shell image pull secrets | `[]` |
|
||||
|
@ -289,7 +289,7 @@ The command removes all the Kubernetes components associated with the chart and
|
|||
| `metrics.enabled` | Start a sidecar prometheus exporter to expose metrics | `false` |
|
||||
| `metrics.image.registry` | Apache exporter image registry | `docker.io` |
|
||||
| `metrics.image.repository` | Apache exporter image repository | `bitnami/apache-exporter` |
|
||||
| `metrics.image.tag` | Apache exporter image tag (immutable tags are recommended) | `0.11.0-debian-11-r67` |
|
||||
| `metrics.image.tag` | Apache exporter image tag (immutable tags are recommended) | `0.11.0-debian-11-r69` |
|
||||
| `metrics.image.digest` | Apache exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
|
||||
| `metrics.image.pullPolicy` | Apache exporter image pull policy | `IfNotPresent` |
|
||||
| `metrics.image.pullSecrets` | Apache exporter image pull secrets | `[]` |
|
||||
|
|
|
@ -73,7 +73,7 @@ diagnosticMode:
|
|||
image:
|
||||
registry: docker.io
|
||||
repository: bitnami/wordpress
|
||||
tag: 6.1.1-debian-11-r6
|
||||
tag: 6.1.1-debian-11-r8
|
||||
digest: ""
|
||||
## Specify a imagePullPolicy
|
||||
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
|
||||
|
@ -755,7 +755,7 @@ volumePermissions:
|
|||
image:
|
||||
registry: docker.io
|
||||
repository: bitnami/bitnami-shell
|
||||
tag: 11-debian-11-r57
|
||||
tag: 11-debian-11-r59
|
||||
digest: ""
|
||||
pullPolicy: IfNotPresent
|
||||
## Optionally specify an array of imagePullSecrets.
|
||||
|
@ -849,7 +849,7 @@ metrics:
|
|||
image:
|
||||
registry: docker.io
|
||||
repository: bitnami/apache-exporter
|
||||
tag: 0.11.0-debian-11-r67
|
||||
tag: 0.11.0-debian-11-r69
|
||||
digest: ""
|
||||
pullPolicy: IfNotPresent
|
||||
## Optionally specify an array of imagePullSecrets.
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
|
@ -25,4 +25,4 @@ name: zookeeper
|
|||
sources:
|
||||
- https://github.com/bitnami/containers/tree/main/bitnami/zookeeper
|
||||
- https://zookeeper.apache.org/
|
||||
version: 10.2.5
|
||||
version: 11.0.0
|
||||
|
|
|
@ -240,6 +240,7 @@ The command removes all the Kubernetes components associated with the chart and
|
|||
| `persistence.accessModes` | PVC Access modes | `["ReadWriteOnce"]` |
|
||||
| `persistence.size` | PVC Storage Request for ZooKeeper data volume | `8Gi` |
|
||||
| `persistence.annotations` | Annotations for the PVC | `{}` |
|
||||
| `persistence.labels` | Labels for the PVC | `{}` |
|
||||
| `persistence.selector` | Selector to match an existing Persistent Volume for ZooKeeper's data PVC | `{}` |
|
||||
| `persistence.dataLogDir.size` | PVC Storage Request for ZooKeeper's dedicated data log directory | `8Gi` |
|
||||
| `persistence.dataLogDir.existingClaim` | Provide an existing `PersistentVolumeClaim` for ZooKeeper's data log directory | `""` |
|
||||
|
|
|
@ -488,16 +488,11 @@ spec:
|
|||
{{- if not .Values.persistence.existingClaim }}
|
||||
- metadata:
|
||||
name: data
|
||||
annotations:
|
||||
{{- range $key, $value := .Values.persistence.annotations }}
|
||||
{{ $key }}: {{ $value }}
|
||||
{{- if .Values.persistence.annotations }}
|
||||
annotations: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.annotations "context" $) | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- if .Values.commonAnnotations }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- if .Values.commonLabels }}
|
||||
labels:
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 10 }}
|
||||
{{- if .Values.persistence.labels }}
|
||||
labels: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.labels "context" $) | nindent 10 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
accessModes:
|
||||
|
@ -515,16 +510,11 @@ spec:
|
|||
{{- if and (not .Values.persistence.dataLogDir.existingClaim) .Values.dataLogDir }}
|
||||
- metadata:
|
||||
name: data-log
|
||||
annotations:
|
||||
{{- range $key, $value := .Values.persistence.annotations }}
|
||||
{{ $key }}: {{ $value }}
|
||||
{{- if .Values.persistence.annotations }}
|
||||
annotations: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.annotations "context" $) | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- if .Values.commonAnnotations }}
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- if .Values.commonLabels }}
|
||||
labels:
|
||||
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 10 }}
|
||||
{{- if .Values.persistence.labels }}
|
||||
labels: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.labels "context" $) | nindent 10 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
accessModes:
|
||||
|
|
|
@ -607,6 +607,9 @@ persistence:
|
|||
## @param persistence.annotations Annotations for the PVC
|
||||
##
|
||||
annotations: {}
|
||||
## @param persistence.labels Labels for the PVC
|
||||
##
|
||||
labels: {}
|
||||
## @param persistence.selector Selector to match an existing Persistent Volume for ZooKeeper's data PVC
|
||||
## If set, the PVC can't have a PV dynamically provisioned for it
|
||||
## E.g.
|
||||
|
|
|
@ -4,7 +4,7 @@ annotations:
|
|||
catalog.cattle.io/kube-version: '>=v1.16.0-0'
|
||||
catalog.cattle.io/release-name: citrix-cpx-with-ingress-controller
|
||||
apiVersion: v2
|
||||
appVersion: 1.27.15
|
||||
appVersion: 1.28.2
|
||||
description: A Helm chart for Citrix ADC CPX with Citrix ingress Controller running
|
||||
as sidecar.
|
||||
home: https://www.citrix.com
|
||||
|
@ -18,4 +18,4 @@ maintainers:
|
|||
name: citrix-cpx-with-ingress-controller
|
||||
sources:
|
||||
- https://github.com/citrix/citrix-k8s-ingress-controller
|
||||
version: 1.27.15
|
||||
version: 1.28.2
|
||||
|
|
|
@ -455,10 +455,11 @@ The following table lists the configurable parameters of the Citrix ADC CPX with
|
|||
| daemonSet | Optional | False | Set this to true if Citrix ADC CPX needs to be deployed as DaemonSet. |
|
||||
| cic.imageRegistry | Mandatory | `quay.io` | The Citrix ingress controller image registry |
|
||||
| cic.imageRepository | Mandatory | `citrix/citrix-k8s-ingress-controller` | The Citrix ingress controller image repository |
|
||||
| cic.imageTag | Mandatory | `1.27.15` | The Citrix ingress controller image tag |
|
||||
| cic.imageTag | Mandatory | `1.28.2` | The Citrix ingress controller image tag |
|
||||
| cic.pullPolicy | Mandatory | IfNotPresent | The Citrix ingress controller image pull policy. |
|
||||
| cic.required | Mandatory | true | CIC to be run as sidecar with Citrix ADC CPX |
|
||||
| cic.resources | Optional | {} | CPU/Memory resource requests/limits for Citrix Ingress Controller container |
|
||||
| cic.rbacRole | Optional | false | To deploy CIC with RBAC Role set rbacRole=true; by default CIC gets installed with RBAC ClusterRole(rbacRole=false)) |
|
||||
| imagePullSecrets | Optional | N/A | Provide list of Kubernetes secrets to be used for pulling the images from a private Docker registry or repository. For more information on how to create this secret please see [Pull an Image from a Private Registry](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/). |
|
||||
| nameOverride | Optional | N/A | String to partially override deployment fullname template with a string (will prepend the release name) |
|
||||
| fullNameOverride | Optional | N/A | String to fully override deployment fullname template with a string |
|
||||
|
|
|
@ -143,6 +143,10 @@ spec:
|
|||
{{- else }}
|
||||
- name: "NS_IP"
|
||||
value: "127.0.0.1"
|
||||
{{- end }}
|
||||
{{- if .Values.rbacRole }}
|
||||
- name: "SCOPE"
|
||||
value: "local"
|
||||
{{- end }}
|
||||
- name: "NS_APPS_NAME_PREFIX"
|
||||
value: {{ .Values.entityPrefix | default "k8s"}}
|
||||
|
|
|
@ -1,7 +1,14 @@
|
|||
{{- if not .Values.rbacRole }}
|
||||
kind: ClusterRole
|
||||
{{- else }}
|
||||
kind: Role
|
||||
{{- end }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ include "citrix-cpx-ingress-controller.serviceAccountName" . }}
|
||||
{{- if .Values.rbacRole }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
{{- if .Values.openshift }}
|
||||
|
@ -59,13 +66,24 @@ rules:
|
|||
|
||||
---
|
||||
|
||||
{{- if not .Values.rbacRole }}
|
||||
kind: ClusterRoleBinding
|
||||
{{- else }}
|
||||
kind: RoleBinding
|
||||
{{- end }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ include "citrix-cpx-ingress-controller.serviceAccountName" . }}
|
||||
{{- if .Values.rbacRole }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
{{- if not .Values.rbacRole }}
|
||||
kind: ClusterRole
|
||||
{{- else }}
|
||||
kind: Role
|
||||
{{- end }}
|
||||
name: {{ include "citrix-cpx-ingress-controller.serviceAccountName" . }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
|
@ -86,4 +104,3 @@ imagePullSecrets:
|
|||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
---
|
||||
|
|
|
@ -45,6 +45,10 @@ ingressIP:
|
|||
# If IPAM controller is used for auto allocation of the external IP for service of type LoadBalancer, set this option to true
|
||||
ipam: False
|
||||
|
||||
# Enable RBAC role (so called local role), by default CIC deployed with ClusterRole.
|
||||
# below variable to deploy CIC with RBAC role, only ingress service supported with this config
|
||||
rbacRole: False
|
||||
|
||||
# API server Cert verification can be disabled, while communicating with API Server, if disableAPIServerCertVerify set to True
|
||||
disableAPIServerCertVerify: False
|
||||
|
||||
|
@ -78,7 +82,7 @@ servicePorts: []
|
|||
cic:
|
||||
imageRegistry: quay.io
|
||||
imageRepository: citrix/citrix-k8s-ingress-controller
|
||||
imageTag: 1.27.15
|
||||
imageTag: 1.28.2
|
||||
image: "{{ .Values.cic.imageRegistry }}/{{ .Values.cic.imageRepository }}:{{ .Values.cic.imageTag }}"
|
||||
pullPolicy: IfNotPresent
|
||||
required: true
|
||||
|
|
|
@ -4,7 +4,7 @@ annotations:
|
|||
catalog.cattle.io/kube-version: '>=v1.16.0-0'
|
||||
catalog.cattle.io/release-name: citrix-ingress-controller
|
||||
apiVersion: v2
|
||||
appVersion: 1.27.15
|
||||
appVersion: 1.28.2
|
||||
description: A Helm chart for Citrix Ingress Controller configuring MPX/VPX.
|
||||
home: https://www.citrix.com
|
||||
icon: https://raw.githubusercontent.com/citrix/citrix-helm-charts/gh-pages/icon.png
|
||||
|
@ -17,4 +17,4 @@ maintainers:
|
|||
name: citrix-ingress-controller
|
||||
sources:
|
||||
- https://github.com/citrix/citrix-k8s-ingress-controller
|
||||
version: 1.27.15
|
||||
version: 1.28.2
|
||||
|
|
|
@ -316,7 +316,7 @@ The following table lists the mandatory and optional parameters that you can con
|
|||
| license.accept | Mandatory | no | Set `yes` to accept the CIC end user license agreement. |
|
||||
| imageRegistry | Mandatory | `quay.io` | The Citrix ingress controller image registry |
|
||||
| imageRepository | Mandatory | `citrix/citrix-k8s-ingress-controller` | The Citrix ingress controller image repository |
|
||||
| imageTag | Mandatory | `1.27.15` | The Citrix ingress controller image tag |
|
||||
| imageTag | Mandatory | `1.28.2` | The Citrix ingress controller image tag |
|
||||
| pullPolicy | Mandatory | IfNotPresent | The CIC image pull policy. |
|
||||
| imagePullSecrets | Optional | N/A | Provide list of Kubernetes secrets to be used for pulling the images from a private Docker registry or repository. For more information on how to create this secret please see [Pull an Image from a Private Registry](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/). |
|
||||
| nameOverride | Optional | N/A | String to partially override deployment fullname template with a string (will prepend the release name) |
|
||||
|
@ -392,6 +392,7 @@ The following table lists the mandatory and optional parameters that you can con
|
|||
| nsLbHashAlgo.hashAlgorithm | Optional | 'default' | Specifies the supported algorithm. Supported algorithms are "default", "jarh", "prac", Default value is 'default' |
|
||||
| extraVolumeMounts | Optional | [] | Specify the Additional VolumeMounts to be mounted in CIC container |
|
||||
| extraVolumes | Optional | [] | Specify the Additional Volumes for additional volumeMounts |
|
||||
| rbacRole | Optional | false | To deploy CIC with RBAC Role set rbacRole=true; by default CIC gets installed with RBAC ClusterRole(rbacRole=false)) |
|
||||
|
||||
Alternatively, you can define a YAML file with the values for the parameters and pass the values while installing the chart.
|
||||
|
||||
|
|
|
@ -89,6 +89,10 @@ spec:
|
|||
- name: "NS_VIP"
|
||||
value: "{{ .Values.nsVIP }}"
|
||||
{{- end }}
|
||||
{{- if .Values.rbacRole }}
|
||||
- name: "SCOPE"
|
||||
value: "local"
|
||||
{{- end }}
|
||||
{{- if .Values.nitroReadTimeout }}
|
||||
- name: "NS_NITRO_READ_TIMEOUT"
|
||||
value: "{{ .Values.nitroReadTimeout }}"
|
||||
|
|
|
@ -1,7 +1,14 @@
|
|||
{{- if not .Values.rbacRole }}
|
||||
kind: ClusterRole
|
||||
{{- else }}
|
||||
kind: Role
|
||||
{{- end }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ include "citrix-ingress-controller.serviceAccountName" . }}
|
||||
{{- if .Values.rbacRole }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
{{- if .Values.openshift }}
|
||||
|
@ -59,13 +66,24 @@ rules:
|
|||
|
||||
---
|
||||
|
||||
{{- if not .Values.rbacRole }}
|
||||
kind: ClusterRoleBinding
|
||||
{{- else }}
|
||||
kind: RoleBinding
|
||||
{{- end }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ include "citrix-ingress-controller.serviceAccountName" . }}
|
||||
{{- if .Values.rbacRole }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
{{- if not .Values.rbacRole }}
|
||||
kind: ClusterRole
|
||||
{{- else }}
|
||||
kind: Role
|
||||
{{- end }}
|
||||
name: {{ include "citrix-ingress-controller.serviceAccountName" . }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
|
@ -86,4 +104,3 @@ imagePullSecrets:
|
|||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
---
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
# Citrix Ingress Controller config details
|
||||
imageRegistry: quay.io
|
||||
imageRepository: citrix/citrix-k8s-ingress-controller
|
||||
imageTag: 1.27.15
|
||||
imageTag: 1.28.2
|
||||
image: "{{ .Values.imageRegistry }}/{{ .Values.imageRepository }}:{{ .Values.imageTag }}"
|
||||
pullPolicy: IfNotPresent
|
||||
imagePullSecrets: []
|
||||
|
@ -103,6 +103,10 @@ crds:
|
|||
install: false
|
||||
retainOnDelete: false
|
||||
|
||||
# Enable RBAC role (so called local role), by default CIC deployed with ClusterRole.
|
||||
# below variable to deploy CIC with RBAC role, only ingress service supported with this config
|
||||
rbacRole: False
|
||||
|
||||
# Config required to be done by Citrix Ingress Controller for sending metrics to Citrix Observability Exporter
|
||||
analyticsConfig:
|
||||
required: false
|
||||
|
|
|
@ -4,7 +4,7 @@ annotations:
|
|||
catalog.cattle.io/kube-version: '>=1.8-0'
|
||||
catalog.cattle.io/release-name: cockroachdb
|
||||
apiVersion: v1
|
||||
appVersion: 22.1.11
|
||||
appVersion: 22.2.0
|
||||
description: CockroachDB is a scalable, survivable, strongly-consistent SQL database.
|
||||
home: https://www.cockroachlabs.com
|
||||
icon: https://raw.githubusercontent.com/cockroachdb/cockroach/master/docs/media/cockroach_db.png
|
||||
|
@ -14,4 +14,4 @@ maintainers:
|
|||
name: cockroachdb
|
||||
sources:
|
||||
- https://github.com/cockroachdb/cockroach
|
||||
version: 9.1.1
|
||||
version: 10.0.0
|
||||
|
|
|
@ -229,10 +229,10 @@ kubectl get pods \
|
|||
```
|
||||
|
||||
```
|
||||
my-release-cockroachdb-0 cockroachdb/cockroach:v22.1.11
|
||||
my-release-cockroachdb-1 cockroachdb/cockroach:v22.1.11
|
||||
my-release-cockroachdb-2 cockroachdb/cockroach:v22.1.11
|
||||
my-release-cockroachdb-3 cockroachdb/cockroach:v22.1.11
|
||||
my-release-cockroachdb-0 cockroachdb/cockroach:v22.2.0
|
||||
my-release-cockroachdb-1 cockroachdb/cockroach:v22.2.0
|
||||
my-release-cockroachdb-2 cockroachdb/cockroach:v22.2.0
|
||||
my-release-cockroachdb-3 cockroachdb/cockroach:v22.2.0
|
||||
```
|
||||
|
||||
Resume normal operations. Once you are comfortable that the stability and performance of the cluster is what you'd expect post-upgrade, finalize the upgrade:
|
||||
|
@ -287,7 +287,7 @@ Verify that no pod is deleted and then upgrade as normal. A new StatefulSet will
|
|||
|
||||
For more information about upgrading a cluster to the latest major release of CockroachDB, see [Upgrade to CockroachDB v21.1](https://www.cockroachlabs.com/docs/stable/upgrade-cockroach-version.html).
|
||||
|
||||
Note that there are some backward-incompatible changes to SQL features between versions 20.2 and 21.1. For details, see the [CockroachDB v22.1.11 release notes](https://www.cockroachlabs.com/docs/releases/v22.1.11.html#backward-incompatible-changes).
|
||||
Note that there are some backward-incompatible changes to SQL features between versions 20.2 and 21.1. For details, see the [CockroachDB v22.2.0 release notes](https://www.cockroachlabs.com/docs/releases/v22.2.0.html#backward-incompatible-changes).
|
||||
|
||||
## Configuration
|
||||
|
||||
|
@ -316,7 +316,7 @@ For details see the [`values.yaml`](values.yaml) file.
|
|||
| `conf.store.size` | CockroachDB storage size | `""` |
|
||||
| `conf.store.attrs` | CockroachDB storage attributes | `""` |
|
||||
| `image.repository` | Container image name | `cockroachdb/cockroach` |
|
||||
| `image.tag` | Container image tag | `v22.1.11` |
|
||||
| `image.tag` | Container image tag | `v22.2.0` |
|
||||
| `image.pullPolicy` | Container pull policy | `IfNotPresent` |
|
||||
| `image.credentials` | `registry`, `user` and `pass` credentials to pull private image | `{}` |
|
||||
| `statefulset.replicas` | StatefulSet replicas number | `3` |
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
# Generated file, DO NOT EDIT. Source: build/templates/values.yaml
|
||||
image:
|
||||
repository: cockroachdb/cockroach
|
||||
tag: v22.1.11
|
||||
tag: v22.2.0
|
||||
pullPolicy: IfNotPresent
|
||||
credentials: {}
|
||||
# registry: docker.io
|
||||
|
|
|
@ -0,0 +1,23 @@
|
|||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
|
@ -4,9 +4,9 @@ annotations:
|
|||
catalog.cattle.io/kube-version: '>=1.18-0'
|
||||
catalog.cattle.io/release-name: cf-runtime
|
||||
apiVersion: v2
|
||||
appVersion: 1.7.8
|
||||
appVersion: 1.8.0
|
||||
description: A Helm chart for Codefresh Runner
|
||||
icon: https://partner-charts.rancher.io/assets/logos/codefresh.jpg
|
||||
name: cf-runtime
|
||||
type: application
|
||||
version: 1.7.8
|
||||
version: 1.8.0
|
||||
|
|
|
@ -24,7 +24,7 @@ dockerRegistry: "quay.io" # Registry prefix for the runtime images (default quay
|
|||
newRelicLicense: "" # NEWRELIC_LICENSE_KEY (for app-proxy and runner deployments)
|
||||
|
||||
runner: # Runner Deployment
|
||||
image: "codefresh/venona:1.7.8"
|
||||
image: "codefresh/venona:1.8.0"
|
||||
env: {}
|
||||
## e.g:
|
||||
# env:
|
||||
|
|
|
@ -1,5 +1,13 @@
|
|||
# Datadog changelog
|
||||
|
||||
## 3.5.1
|
||||
|
||||
* Removing default value placeholder for the API Key in the values.yaml.
|
||||
|
||||
## 3.5.0
|
||||
|
||||
* Remove runtime compilation-related config values `enableKernelHeaderDownload` and `enableRuntimeCompiler` in the system-probe.
|
||||
|
||||
## 3.4.0
|
||||
|
||||
* Add `datadog.systemProbe.btfPath` for mounting user-provided BTF files (see datadog-agent PRs #13962 and #14096 for more context).
|
||||
|
|
|
@ -19,4 +19,4 @@ name: datadog
|
|||
sources:
|
||||
- https://app.datadoghq.com/account/settings#agent/kubernetes
|
||||
- https://github.com/DataDog/datadog-agent
|
||||
version: 3.4.0
|
||||
version: 3.5.1
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Datadog
|
||||
|
||||
![Version: 3.4.0](https://img.shields.io/badge/Version-3.4.0-informational?style=flat-square) ![AppVersion: 7](https://img.shields.io/badge/AppVersion-7-informational?style=flat-square)
|
||||
![Version: 3.5.1](https://img.shields.io/badge/Version-3.5.1-informational?style=flat-square) ![AppVersion: 7](https://img.shields.io/badge/AppVersion-7-informational?style=flat-square)
|
||||
|
||||
[Datadog](https://www.datadoghq.com/) is a hosted infrastructure monitoring platform. This chart adds the Datadog Agent to all nodes in your cluster via a DaemonSet. It also optionally depends on the [kube-state-metrics chart](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-state-metrics). For more information about monitoring Kubernetes with Datadog, please refer to the [Datadog documentation website](https://docs.datadoghq.com/agent/basic_agent_usage/kubernetes/).
|
||||
|
||||
|
@ -570,7 +570,7 @@ helm install <RELEASE_NAME> \
|
|||
| clusterChecksRunner.volumes | list | `[]` | Specify additional volumes to mount in the cluster checks container |
|
||||
| commonLabels | object | `{}` | Labels to apply to all resources |
|
||||
| datadog-crds.crds.datadogMetrics | bool | `true` | Set to true to deploy the DatadogMetrics CRD |
|
||||
| datadog.apiKey | string | `"<DATADOG_API_KEY>"` | Your Datadog API key |
|
||||
| datadog.apiKey | string | `nil` | Your Datadog API key |
|
||||
| datadog.apiKeyExistingSecret | string | `nil` | Use existing Secret which stores API key instead of creating a new one. The value should be set with the `api-key` key inside the secret. |
|
||||
| datadog.apm.enabled | bool | `false` | Enable this to enable APM and tracing, on port 8126 DEPRECATED. Use datadog.apm.portEnabled instead |
|
||||
| datadog.apm.hostSocketPath | string | `"/var/run/datadog/"` | Host path to the trace-agent socket |
|
||||
|
@ -694,9 +694,7 @@ helm install <RELEASE_NAME> \
|
|||
| datadog.systemProbe.enableConntrack | bool | `true` | Enable the system-probe agent to connect to the netlink/conntrack subsystem to add NAT information to connection data |
|
||||
| datadog.systemProbe.enableDefaultKernelHeadersPaths | bool | `true` | Enable mount of default paths where kernel headers are stored |
|
||||
| datadog.systemProbe.enableDefaultOsReleasePaths | bool | `true` | enable default os-release files mount |
|
||||
| datadog.systemProbe.enableKernelHeaderDownload | bool | `true` | Enable the downloading of kernel headers for runtime compilation of eBPF probes |
|
||||
| datadog.systemProbe.enableOOMKill | bool | `false` | Enable the OOM kill eBPF-based check |
|
||||
| datadog.systemProbe.enableRuntimeCompiler | bool | `false` | Enable the runtime compiler for eBPF probes |
|
||||
| datadog.systemProbe.enableTCPQueueLength | bool | `false` | Enable the TCP queue length eBPF-based check |
|
||||
| datadog.systemProbe.maxTrackedConnections | int | `131072` | the maximum number of tracked connections |
|
||||
| datadog.systemProbe.mountPackageManagementDirs | list | `[]` | Enables mounting of specific package management directories when runtime compilation is enabled |
|
||||
|
|
|
@ -137,6 +137,16 @@ The option `datadog.apm.socketEnabled` is enabled by default and can be used to
|
|||
|
||||
{{- end }}
|
||||
|
||||
{{- if or .Values.datadog.systemProbe.enableKernelHeaderDownload .Values.datadog.systemProbe.enableRuntimeCompiler }}
|
||||
|
||||
#################################################################
|
||||
#### WARNING: Deprecation notice ####
|
||||
#################################################################
|
||||
|
||||
The `enableKernelHeaderDownload` and `enableRuntimeCompiler` options are not supported anymore, in order to enable the runtime compiler, set the environment variable `DD_ENABLE_KERNEL_HEADER_DOWNLOAD` and `DD_ENABLE_RUNTIME_COMPILER` in the system probe.
|
||||
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.datadog.apm.useSocketVolume }}
|
||||
|
||||
#################################################################
|
||||
|
@ -308,7 +318,7 @@ are enabled:
|
|||
* Failure policy of the Admission Controller is set to "Fail"
|
||||
{{- end }}
|
||||
|
||||
To run in high availability mode, our recommandation is to update the chart
|
||||
To run in high availability mode, our recommendation is to update the chart
|
||||
configuration with:
|
||||
* set `clusterAgent.replicas` value to `2` replicas .
|
||||
* set `clusterAgent.createPodDisruptionBudget` to `true`.
|
||||
|
|
|
@ -619,7 +619,7 @@ Return the local service name
|
|||
Return true if runtime compilation is enabled in the system-probe
|
||||
*/}}
|
||||
{{- define "runtime-compilation-enabled" -}}
|
||||
{{- if or .Values.datadog.systemProbe.enableTCPQueueLength .Values.datadog.systemProbe.enableOOMKill .Values.datadog.systemProbe.enableRuntimeCompiler -}}
|
||||
{{- if or .Values.datadog.systemProbe.enableTCPQueueLength .Values.datadog.systemProbe.enableOOMKill .Values.datadog.serviceMonitoring.enabled -}}
|
||||
true
|
||||
{{- else -}}
|
||||
false
|
||||
|
|
|
@ -32,8 +32,6 @@ data:
|
|||
collect_dns_stats: {{ $.Values.datadog.systemProbe.collectDNSStats }}
|
||||
max_tracked_connections: {{ $.Values.datadog.systemProbe.maxTrackedConnections }}
|
||||
conntrack_max_state_size: {{ $.Values.datadog.systemProbe.conntrackMaxStateSize }}
|
||||
enable_runtime_compiler: {{ $.Values.datadog.systemProbe.enableRuntimeCompiler }}
|
||||
enable_kernel_header_download: {{ $.Values.datadog.systemProbe.enableKernelHeaderDownload }}
|
||||
runtime_compiler_output_dir: {{ $.Values.datadog.systemProbe.runtimeCompilationAssetDir }}/build
|
||||
kernel_header_download_dir: {{ $.Values.datadog.systemProbe.runtimeCompilationAssetDir }}/kernel-headers
|
||||
apt_config_dir: /host/etc/apt
|
||||
|
|
|
@ -30,7 +30,7 @@ datadog:
|
|||
# datadog.apiKey -- Your Datadog API key
|
||||
|
||||
## ref: https://app.datadoghq.com/account/settings#agent/kubernetes
|
||||
apiKey: <DATADOG_API_KEY>
|
||||
apiKey: # <DATADOG_API_KEY>
|
||||
|
||||
# datadog.apiKeyExistingSecret -- Use existing Secret which stores API key instead of creating a new one. The value should be set with the `api-key` key inside the secret.
|
||||
|
||||
|
@ -511,12 +511,6 @@ datadog:
|
|||
# datadog.systemProbe.enableOOMKill -- Enable the OOM kill eBPF-based check
|
||||
enableOOMKill: false
|
||||
|
||||
# datadog.systemProbe.enableRuntimeCompiler -- Enable the runtime compiler for eBPF probes
|
||||
enableRuntimeCompiler: false
|
||||
|
||||
# datadog.systemProbe.enableKernelHeaderDownload -- Enable the downloading of kernel headers for runtime compilation of eBPF probes
|
||||
enableKernelHeaderDownload: true
|
||||
|
||||
# datadog.systemProbe.mountPackageManagementDirs -- Enables mounting of specific package management directories when runtime compilation is enabled
|
||||
mountPackageManagementDirs: []
|
||||
## For runtime compilation to be able to download kernel headers, the host's package management folders
|
||||
|
|
|
@ -2,6 +2,10 @@
|
|||
documentation](doc/development/changelog.md) for instructions on adding your own
|
||||
entry.
|
||||
|
||||
## 6.6.2 (2022-12-05)
|
||||
|
||||
No changes.
|
||||
|
||||
## 6.6.1 (2022-11-30)
|
||||
|
||||
No changes.
|
||||
|
|
|
@ -3,7 +3,7 @@ annotations:
|
|||
catalog.cattle.io/display-name: GitLab
|
||||
catalog.cattle.io/release-name: gitlab
|
||||
apiVersion: v1
|
||||
appVersion: 15.6.1
|
||||
appVersion: 15.6.2
|
||||
description: The One DevOps Platform
|
||||
home: https://about.gitlab.com/
|
||||
icon: https://gitlab.com/gitlab-com/gitlab-artwork/raw/master/logo/logo-square.png
|
||||
|
@ -15,4 +15,4 @@ maintainers:
|
|||
name: gitlab
|
||||
sources:
|
||||
- https://gitlab.com/gitlab-org/charts/gitlab
|
||||
version: 6.6.1
|
||||
version: 6.6.2
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
apiVersion: v1
|
||||
appVersion: 15.6.1
|
||||
appVersion: 15.6.2
|
||||
description: GitLab Geo logcursor
|
||||
home: https://about.gitlab.com/
|
||||
icon: https://gitlab.com/gitlab-com/gitlab-artwork/raw/master/logo/logo-square.svg
|
||||
|
@ -12,4 +12,4 @@ name: geo-logcursor
|
|||
sources:
|
||||
- https://gitlab.com/charts/gitlab/tree/master/charts/gitlab/charts/geo-logcursor
|
||||
- https://gitlab.com/gitlab-org/build/CNG/tree/master/gitlab-rails
|
||||
version: 6.6.1
|
||||
version: 6.6.2
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
apiVersion: v1
|
||||
appVersion: 15.6.1
|
||||
appVersion: 15.6.2
|
||||
description: Git RPC service for handling all the git calls made by GitLab
|
||||
home: https://about.gitlab.com/
|
||||
icon: https://gitlab.com/gitlab-com/gitlab-artwork/raw/master/logo/logo-square.svg
|
||||
|
@ -13,4 +13,4 @@ name: gitaly
|
|||
sources:
|
||||
- https://gitlab.com/gitlab-org/charts/gitlab/tree/master/charts/gitlab/charts/gitaly
|
||||
- https://gitlab.com/gitlab-org/build/CNG/tree/master/gitaly
|
||||
version: 6.6.1
|
||||
version: 6.6.2
|
||||
|
|
|
@ -14,4 +14,4 @@ sources:
|
|||
- https://gitlab.com/gitlab-org/charts/gitlab/tree/master/charts/gitlab/charts/gitlab-exporter
|
||||
- https://gitlab.com/gitlab-org/build/CNG/tree/master/gitlab-exporter
|
||||
- https://gitlab.com/gitlab-org/gitlab-exporter
|
||||
version: 6.6.1
|
||||
version: 6.6.2
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
apiVersion: v1
|
||||
appVersion: 15.6.1
|
||||
appVersion: 15.6.2
|
||||
description: Adapt the Grafana chart to interface to the GitLab App
|
||||
home: https://about.gitlab.com/
|
||||
icon: https://gitlab.com/gitlab-com/gitlab-artwork/raw/master/logo/logo-square.svg
|
||||
|
@ -13,4 +13,4 @@ name: gitlab-grafana
|
|||
sources:
|
||||
- https://gitlab.com/gitlab-org/charts/gitlab/tree/master/charts/gitlab/charts/gitlab-grafana
|
||||
- https://gitlab.com/gitlab-org/build/CNG/tree/master/gitlab-grafana
|
||||
version: 6.6.1
|
||||
version: 6.6.2
|
||||
|
|
|
@ -14,4 +14,4 @@ sources:
|
|||
- https://gitlab.com/gitlab-org/charts/gitlab/tree/master/charts/gitlab/charts/gitlab-pages
|
||||
- https://gitlab.com/gitlab-org/build/CNG/tree/master/gitlab-pages
|
||||
- https://gitlab.com/gitlab-org/gitlab-pages
|
||||
version: 6.6.1
|
||||
version: 6.6.2
|
||||
|
|
|
@ -14,4 +14,4 @@ name: gitlab-shell
|
|||
sources:
|
||||
- https://gitlab.com/gitlab-org/charts/gitlab/tree/master/charts/gitlab/charts/gitlab-shell
|
||||
- https://gitlab.com/gitlab-org/build/CNG/tree/master/gitlab-shell
|
||||
version: 6.6.1
|
||||
version: 6.6.2
|
||||
|
|
|
@ -17,4 +17,4 @@ name: kas
|
|||
sources:
|
||||
- https://gitlab.com/gitlab-org/charts/gitlab/tree/master/charts/gitlab/charts/gitlab-kas
|
||||
- https://gitlab.com/gitlab-org/cluster-integration/gitlab-agent
|
||||
version: 6.6.1
|
||||
version: 6.6.2
|
||||
|
|
|
@ -13,4 +13,4 @@ name: mailroom
|
|||
sources:
|
||||
- https://gitlab.com/gitlab-org/charts/gitlab/tree/master/charts/gitlab/charts/mailroom
|
||||
- https://gitlab.com/gitlab-org/build/CNG/tree/master/gitlab-mailroom
|
||||
version: 6.6.1
|
||||
version: 6.6.2
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
apiVersion: v1
|
||||
appVersion: 15.6.1
|
||||
appVersion: 15.6.2
|
||||
description: Database migrations and other versioning tasks for upgrading Gitlab
|
||||
home: https://about.gitlab.com/
|
||||
icon: https://gitlab.com/gitlab-com/gitlab-artwork/raw/master/logo/logo-square.svg
|
||||
|
@ -12,4 +12,4 @@ name: migrations
|
|||
sources:
|
||||
- https://gitlab.com/gitlab-org/charts/gitlab/tree/master/charts/gitlab/charts/migrations
|
||||
- https://gitlab.com/gitlab-org/build/CNG/tree/master/gitlab-rails
|
||||
version: 6.6.1
|
||||
version: 6.6.2
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
apiVersion: v1
|
||||
appVersion: 15.6.1
|
||||
appVersion: 15.6.2
|
||||
description: Praefect is a router and transaction manager for Gitaly, and a required
|
||||
component for running a Gitaly Cluster.
|
||||
home: https://about.gitlab.com/
|
||||
|
@ -16,4 +16,4 @@ sources:
|
|||
- https://gitlab.com/gitlab-org/charts/gitlab/tree/master/charts/gitlab/charts/praefect
|
||||
- https://gitlab.com/gitlab-org/build/CNG/tree/master/gitaly
|
||||
- https://gitlab.com/gitlab-org/gitaly/-/tree/master/cmd/praefect
|
||||
version: 6.6.1
|
||||
version: 6.6.2
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
apiVersion: v1
|
||||
appVersion: 15.6.1
|
||||
appVersion: 15.6.2
|
||||
description: Gitlab Sidekiq for asynchronous task processing in rails
|
||||
home: https://about.gitlab.com/
|
||||
icon: https://gitlab.com/gitlab-com/gitlab-artwork/raw/master/logo/logo-square.svg
|
||||
|
@ -13,4 +13,4 @@ name: sidekiq
|
|||
sources:
|
||||
- https://gitlab.com/gitlab-org/charts/gitlab/tree/master/charts/gitlab/charts/sidekiq
|
||||
- https://gitlab.com/gitlab-org/build/CNG/tree/master/gitlab-sidekiq
|
||||
version: 6.6.1
|
||||
version: 6.6.2
|
||||
|
|
|
@ -14,4 +14,4 @@ name: spamcheck
|
|||
sources:
|
||||
- https://gitlab.com/gitlab-org/charts/gitlab/tree/master/charts/gitlab/charts/spamcheck
|
||||
- https://gitlab.com/gitlab-org/spamcheck
|
||||
version: 6.6.1
|
||||
version: 6.6.2
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
apiVersion: v1
|
||||
appVersion: 15.6.1
|
||||
appVersion: 15.6.2
|
||||
description: For manually running rake tasks through kubectl
|
||||
home: https://about.gitlab.com/
|
||||
icon: https://gitlab.com/gitlab-com/gitlab-artwork/raw/master/logo/logo-square.svg
|
||||
|
@ -13,4 +13,4 @@ name: toolbox
|
|||
sources:
|
||||
- https://gitlab.com/gitlab-org/charts/gitlab/tree/master/charts/gitlab/charts/toolbox
|
||||
- https://gitlab.com/gitlab-org/build/CNG/tree/master/gitlab-toolbox
|
||||
version: 6.6.1
|
||||
version: 6.6.2
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
apiVersion: v1
|
||||
appVersion: 15.6.1
|
||||
appVersion: 15.6.2
|
||||
description: HTTP server for Gitlab
|
||||
home: https://about.gitlab.com/
|
||||
icon: https://gitlab.com/gitlab-com/gitlab-artwork/raw/master/logo/logo-square.svg
|
||||
|
@ -14,4 +14,4 @@ name: webservice
|
|||
sources:
|
||||
- https://gitlab.com/gitlab-org/charts/gitlab/tree/master/charts/gitlab/charts/webservice
|
||||
- https://gitlab.com/gitlab-org/build/CNG/tree/master/gitlab-webservice
|
||||
version: 6.6.1
|
||||
version: 6.6.2
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue