Merge pull request #779 from nflondo/main-source

Charts CI
pull/782/head
atrendafilov 2023-06-06 20:56:07 +03:00 committed by GitHub
commit 585c47c337
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
60 changed files with 2119 additions and 334 deletions

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -1,7 +1,9 @@
annotations: annotations:
artifacthub.io/changes: | artifacthub.io/changes: |
- kind: changed - kind: changed
description: Upgrade supported Kubernetes version to 1.23.0 due to Amazon EKS EoL description: Upgrade Argo CD to v2.7.4
- kind: added
description: Update knownHosts
artifacthub.io/signKey: | artifacthub.io/signKey: |
fingerprint: 2B8F22F57260EFA67BE1C5824B11F800CD9D2252 fingerprint: 2B8F22F57260EFA67BE1C5824B11F800CD9D2252
url: https://argoproj.github.io/argo-helm/pgp_keys.asc url: https://argoproj.github.io/argo-helm/pgp_keys.asc
@ -10,7 +12,7 @@ annotations:
catalog.cattle.io/kube-version: '>=1.23.0-0' catalog.cattle.io/kube-version: '>=1.23.0-0'
catalog.cattle.io/release-name: argo-cd catalog.cattle.io/release-name: argo-cd
apiVersion: v2 apiVersion: v2
appVersion: v2.7.3 appVersion: v2.7.4
dependencies: dependencies:
- condition: redis-ha.enabled - condition: redis-ha.enabled
name: redis-ha name: redis-ha
@ -32,4 +34,4 @@ name: argo-cd
sources: sources:
- https://github.com/argoproj/argo-helm/tree/main/charts/argo-cd - https://github.com/argoproj/argo-helm/tree/main/charts/argo-cd
- https://github.com/argoproj/argo-cd - https://github.com/argoproj/argo-cd
version: 5.35.0 version: 5.35.1

View File

@ -315,8 +315,11 @@ configs:
# -- Known hosts to be added to the known host list by default. # -- Known hosts to be added to the known host list by default.
# @default -- See [values.yaml] # @default -- See [values.yaml]
knownHosts: | knownHosts: |
bitbucket.org ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIazEu89wgQZ4bqs3d63QSMzYVa0MuJ2e2gKTKqu+UUO [ssh.github.com]:443 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg=
[ssh.github.com]:443 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl
[ssh.github.com]:443 ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCj7ndNxQowgcQnjshcLrqPEiiphnt+VTTvDP6mHBL9j1aNUkY4Ue1gvwnGLVlOhGeYrnZaMgRK6+PKCUXaDbC7qtbW8gIkhL7aGCsOr/C56SJMy/BCZfxd1nWzAOxSDPgVsmerOBYfNqltV9/hWCqBywINIR+5dIg6JTJ72pcEpEjcYgXkE2YEFXV1JHnsKgbLWNlhScqb2UmyRkQyytRLtL+38TGxkxCflmO+5Z8CSSNY7GidjMIZ7Q4zMjA2n1nGrlTDkzwDCsw+wqFPGQA179cnfGWOWRVruj16z6XyvxvjJwbz0wQZ75XK5tKSb7FNyeIEs4TT4jk+S4dhPeAUC5y+bDYirYgM4GC7uEnztnZyaVWQ7B381AK4Qdrwt51ZqExKbQpTUNn+EjqoTwvqNj4kqx5QUCI0ThS/YkOxJCXmPUWZbhjpCg56i+2aB6CmK2JGhn57K5mj0MNdBXA4/WnwH6XoPWJzK5Nyu2zB3nAZp+S5hpQs+p1vN1/wsjk=
bitbucket.org ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBPIQmuzMBuKdWeF4+a2sjSSpBK0iqitSQ+5BM9KhpexuGt20JpTVM7u5BDZngncgrqDMbWdxMWWOGtZ9UgbqgZE= bitbucket.org ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBPIQmuzMBuKdWeF4+a2sjSSpBK0iqitSQ+5BM9KhpexuGt20JpTVM7u5BDZngncgrqDMbWdxMWWOGtZ9UgbqgZE=
bitbucket.org ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIazEu89wgQZ4bqs3d63QSMzYVa0MuJ2e2gKTKqu+UUO
bitbucket.org ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kNvEcqTKl/VqLat/MaB33pZy0y3rJZtnqwR2qOOvbwKZYKiEO1O6VqNEBxKvJJelCq0dTXWT5pbO2gDXC6h6QDXCaHo6pOHGPUy+YBaGQRGuSusMEASYiWunYN0vCAI8QaXnWMXNMdFP3jHAJH0eDsoiGnLPBlBp4TNm6rYI74nMzgz3B9IikW4WVK+dc8KZJZWYjAuORU3jc1c/NPskD2ASinf8v3xnfXeukU0sJ5N6m5E8VLjObPEO+mN2t/FZTMZLiFqPWc/ALSqnMnnhwrNi2rbfg/rd/IpL8Le3pSBne8+seeFVBoGqzHM9yXw== bitbucket.org ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kNvEcqTKl/VqLat/MaB33pZy0y3rJZtnqwR2qOOvbwKZYKiEO1O6VqNEBxKvJJelCq0dTXWT5pbO2gDXC6h6QDXCaHo6pOHGPUy+YBaGQRGuSusMEASYiWunYN0vCAI8QaXnWMXNMdFP3jHAJH0eDsoiGnLPBlBp4TNm6rYI74nMzgz3B9IikW4WVK+dc8KZJZWYjAuORU3jc1c/NPskD2ASinf8v3xnfXeukU0sJ5N6m5E8VLjObPEO+mN2t/FZTMZLiFqPWc/ALSqnMnnhwrNi2rbfg/rd/IpL8Le3pSBne8+seeFVBoGqzHM9yXw==
github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg= github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg=
github.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl github.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl

View File

@ -58,4 +58,4 @@ maintainers:
url: https://github.com/asserts url: https://github.com/asserts
name: asserts name: asserts
type: application type: application
version: 1.40.0 version: 1.41.0

View File

@ -142,7 +142,7 @@ server:
repository: asserts/asserts-server repository: asserts/asserts-server
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
## Overrides the image tag whose default is the chart appVersion. ## Overrides the image tag whose default is the chart appVersion.
tag: v0.2.627 tag: v0.2.631
resources: resources:
requests: requests:
@ -251,7 +251,7 @@ authorization:
repository: asserts/authorization repository: asserts/authorization
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
## Overrides the image tag whose default is the chart appVersion. ## Overrides the image tag whose default is the chart appVersion.
tag: v0.2.627 tag: v0.2.631
resources: resources:
requests: requests:
@ -317,7 +317,7 @@ ui:
repository: asserts/asserts-ui repository: asserts/asserts-ui
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
## Overrides the image tag whose default is the chart appVersion. ## Overrides the image tag whose default is the chart appVersion.
tag: v0.1.1216 tag: v0.1.1224
imagePullSecrets: [] imagePullSecrets: []
@ -884,6 +884,9 @@ alertmanager:
- ReadWriteOnce - ReadWriteOnce
size: 100Mi size: 100Mi
extraArgs:
cluster.listen-address: null
existingConfigMap: asserts-alertmanager existingConfigMap: asserts-alertmanager
configmapReload: configmapReload:

View File

@ -5,7 +5,7 @@ annotations:
catalog.cattle.io/namespace: kubeslice-controller catalog.cattle.io/namespace: kubeslice-controller
catalog.cattle.io/release-name: kubeslice-controller catalog.cattle.io/release-name: kubeslice-controller
apiVersion: v2 apiVersion: v2
appVersion: 0.10.0 appVersion: 1.0.0
description: Multi cloud networking (MCN), multi cluster, hybrid cloud networking description: Multi cloud networking (MCN), multi cluster, hybrid cloud networking
tool for efficient, secure, policy-enforced connectivity and true multi-tenancy tool for efficient, secure, policy-enforced connectivity and true multi-tenancy
capabilities. KubeSlice enables enterprise platform teams to reduce infrastructure capabilities. KubeSlice enables enterprise platform teams to reduce infrastructure
@ -34,6 +34,9 @@ keywords:
- infrastructure - infrastructure
- application - application
kubeVersion: '>= 1.19.0-0' kubeVersion: '>= 1.19.0-0'
maintainers:
- email: support@avesha.io
name: Avesha
name: kubeslice-controller name: kubeslice-controller
type: application type: application
version: 0.10.0 version: 1.0.0

View File

@ -1,13 +1,13 @@
# Kubeslice Enterprise Controller Helm Charts # Kubeslice Enterprise Controller Helm Charts
## Prerequisites ## Prerequisites
📖 Follow the overview and registration [documentation](https://docs.avesha.io/documentation/enterprise/0.10.0/deployment-partners/deploying-kubeslice-on-rancher/) 📖 Follow the overview and registration [documentation](https://docs.avesha.io/documentation/enterprise/1.0.0/deployment-partners/deploying-kubeslice-on-rancher/)
- Create and configure the controller cluster following instructions in the prerequisites section [documentation](https://docs.avesha.io/documentation/enterprise/0.10.0/deployment-partners/deploying-kubeslice-on-rancher/installing-the-kubeslice-controller-on-rancher) - Create and configure the controller cluster following instructions in the prerequisites section [documentation](https://docs.avesha.io/documentation/enterprise/1.0.0/deployment-partners/deploying-kubeslice-on-rancher/installing-the-kubeslice-controller-on-rancher)
- Copy the chart version from the upper right hand section of this page [VERSION parameter need during install and upgrade] - Copy the chart version from the upper right hand section of this page [VERSION parameter need during install and upgrade]
- Click on the download chart link from the upper right hand section of this page, save it to location available from command prompt - Click on the download chart link from the upper right hand section of this page, save it to location available from command prompt
- Untar the chart to get the values.yaml file, update values.yaml with the follwing information - Untar the chart to get the values.yaml file, update values.yaml with the follwing information
- cluster end point [documentation](https://docs.avesha.io/documentation/enterprise/0.10.0/deployment-partners/deploying-kubeslice-on-rancher/installing-the-kubeslice-controller-on-rancher#getting-the-controller-cluster-endpoint) - cluster end point [documentation](https://docs.avesha.io/documentation/enterprise/1.0.0/deployment-partners/deploying-kubeslice-on-rancher/installing-the-kubeslice-controller-on-rancher#getting-the-controller-cluster-endpoint)
- helm repository username, password and email [From registration] - helm repository username, password and email [From registration]
@ -32,7 +32,7 @@ helm upgrade --history-max=5 --namespace=kubeslice-controller kubeslice-controll
``` ```
### Uninstall KubeSlice Controller ### Uninstall KubeSlice Controller
- Follow instructions [documentation](https://docs.avesha.io/documentation/enterprise/0.5.0/getting-started-with-cloud-clusters/uninstalling-kubeslice/uninstalling-the-kubeslice-controller/) - Follow instructions [documentation](https://docs.avesha.io/documentation/enterprise/1.0.0/getting-started-with-cloud-clusters/uninstalling-kubeslice/uninstalling-the-kubeslice-controller/)
```console ```console
export KUBECONFIG=<CONTROLLER CLUSTER KUBECONFIG> export KUBECONFIG=<CONTROLLER CLUSTER KUBECONFIG>

View File

@ -2,7 +2,7 @@
questions: questions:
- -
default: "" default: ""
description: "https://docs.avesha.io/documentation/enterprise/0.10.0/deployment-partners/deploying-kubeslice-on-rancher/#registering-to-access-the-enterprise-helm-chart" description: "https://docs.avesha.io/documentation/enterprise/1.0.0/deployment-partners/deploying-kubeslice-on-rancher/#registering-to-access-the-enterprise-helm-chart"
group: "Global Settings" group: "Global Settings"
label: "Registered Username" label: "Registered Username"
required: true required: true
@ -18,7 +18,7 @@ questions:
variable: imagePullSecrets.password variable: imagePullSecrets.password
- -
default: "" default: ""
description: "https://docs.avesha.io/documentation/enterprise/0.10.0/deployment-partners/deploying-kubeslice-on-rancher/installing-the-kubeslice-controller-on-rancher/#getting-the-controller-cluster-endpoint" description: "https://docs.avesha.io/documentation/enterprise/1.0.0/deployment-partners/deploying-kubeslice-on-rancher/installing-the-kubeslice-controller-on-rancher/#getting-the-controller-cluster-endpoint"
group: "Controller Settings" group: "Controller Settings"
label: "Controller Endpoint" label: "Controller Endpoint"
required: true required: true
@ -48,3 +48,11 @@ questions:
required: true required: true
type: enum type: enum
variable: kubeslice.uiproxy.service.type variable: kubeslice.uiproxy.service.type
-
default: ""
description: "https://docs.avesha.io/documentation/enterprise/1.0.0/reference/configuration-parameters/#license-parameters"
group: "Controller Settings"
label: "Customer Name for generating Trial License"
required: false
type: string
variable: kubeslice.license.customerName

View File

@ -39,6 +39,7 @@ webhooks:
operations: operations:
- CREATE - CREATE
- UPDATE - UPDATE
- DELETE
resources: resources:
- slicenodeaffinities - slicenodeaffinities
sideEffects: None sideEffects: None
@ -170,6 +171,7 @@ webhooks:
operations: operations:
- CREATE - CREATE
- UPDATE - UPDATE
- DELETE
resources: resources:
- serviceexportconfigs - serviceexportconfigs
sideEffects: None sideEffects: None

View File

@ -42,6 +42,10 @@ spec:
- --controller-end-point={{ required "A valid value is required!" .Values.kubeslice.controller.endpoint }} - --controller-end-point={{ required "A valid value is required!" .Values.kubeslice.controller.endpoint }}
- --prometheus-service-endpoint={{ required "A valid value is required!" .Values.kubeslice.prometheus.url}} - --prometheus-service-endpoint={{ required "A valid value is required!" .Values.kubeslice.prometheus.url}}
- --ovpn-job-image={{ .Values.kubeslice.ovpnJob.image }}:{{ .Values.kubeslice.ovpnJob.tag }} - --ovpn-job-image={{ .Values.kubeslice.ovpnJob.image }}:{{ .Values.kubeslice.ovpnJob.tag }}
- --license-mode={{ .Values.kubeslice.license.mode }}
- --license-customer-name={{ .Values.kubeslice.license.customerName }}
- --license-type={{.Values.kubeslice.license.type }}
- --license-image={{ .Values.kubeslice.controller.image }}:{{ .Values.kubeslice.controller.tag }}
command: command:
- /manager - /manager
env: env:
@ -83,11 +87,17 @@ spec:
readOnly: true readOnly: true
- name: kubeslice-controller-event-schema-conf - name: kubeslice-controller-event-schema-conf
mountPath: /events/event-schema/ mountPath: /events/event-schema/
- name: kubeslice-controller-license-conf
mountPath: /etc/license/config
securityContext: securityContext:
runAsNonRoot: true runAsNonRoot: true
serviceAccountName: kubeslice-controller-controller-manager serviceAccountName: kubeslice-controller-controller-manager
terminationGracePeriodSeconds: 10 terminationGracePeriodSeconds: 10
volumes: volumes:
- name: kubeslice-controller-license-conf
configMap:
name: kubeslice-controller-license-config
defaultMode: 420
- name: kubeslice-controller-event-schema-conf - name: kubeslice-controller-event-schema-conf
configMap: configMap:
name: kubeslice-controller-event-schema-conf name: kubeslice-controller-event-schema-conf

View File

@ -3,24 +3,33 @@ kind: Role
metadata: metadata:
name: kubeslice-api-gw name: kubeslice-api-gw
rules: rules:
- verbs: - apiGroups:
- get - controller.kubeslice.io
- list - worker.kubeslice.io
apiGroups: resources:
- controller.kubeslice.io - projects
- worker.kubeslice.io - clusters
resources: verbs:
- projects - get
- clusters - list
- verbs: - apiGroups:
- get - ""
- list - batch
apiGroups: - events.k8s.io
- "" resources:
- events.k8s.io/v1 - secrets
resources: - events
- secrets - pods
- events - pods/log
- jobs
verbs:
- get
- list
- create
- update
- delete
- watch
- patch
--- ---
apiVersion: v1 apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
@ -65,3 +74,60 @@ subjects:
- kind: ServiceAccount - kind: ServiceAccount
name: kubeslice-api-gw name: kubeslice-api-gw
namespace: kubeslice-controller namespace: kubeslice-controller
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: kubeslice-installer-job-role
rules:
- apiGroups:
- controller.kubeslice.io
resources:
- clusters
- clusters/status
verbs:
- patch
- update
- get
- apiGroups:
- ""
- events.k8s.io
resources:
- events
verbs:
- create
- update
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- get
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- delete
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubeslice-installer-job-rb
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubeslice-installer-job-role
subjects:
- kind: ServiceAccount
name: kubeslice-installer-job
namespace: kubeslice-controller
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kubeslice-installer-job
---

View File

@ -37,6 +37,10 @@ spec:
env: env:
- name: KUBESLICE_CONTROLLER_PROMETHEUS - name: KUBESLICE_CONTROLLER_PROMETHEUS
value: {{ .Values.kubeslice.prometheus.url }} value: {{ .Values.kubeslice.prometheus.url }}
- name: KUBESLICE_WORKER_INSTALLER_IMAGE
value: '{{ .Values.kubeslice.workerinstaller.image }}:{{ .Values.kubeslice.workerinstaller.tag }}'
- name: KUBESLICE_WORKER_INSTALLER_IMAGE_PULL_POLICY
value: '{{ .Values.kubeslice.workerinstaller.pullPolicy}}'
name: kubeslice-api-gw name: kubeslice-api-gw
ports: ports:
- containerPort: 3000 - containerPort: 3000
@ -56,3 +60,27 @@ spec:
secret: secret:
secretName: kubeslice-ui-oidc secretName: kubeslice-ui-oidc
optional: true optional: true
---
# create configmap called worker-chart-options
apiVersion: v1
kind: ConfigMap
metadata:
name: worker-chart-options
data:
# set the chart options
workerChartOptions.yaml: |
workerChartOptions:
metricsInsecure: # [Optional] Default is false. Set to true if required to disable TLS for metrics server.
repository: # [Optional] Helm repository URL for worker charts. Default is `https://kubeslice.aveshalabs.io/repository/kubeslice-helm-ent-prod/`
releaseName: # [Optional] Release name of kubeslice-worker. Default is `kubeslice-worker`
chartName: # [Optional] Name of the chart. Default is `kubeslice-worker`
chartVersion: # [Optional] Version of the chart. Default is the latest version
debug: # [Optional] Default is false. Set to true if required to enable debug logs for kubeslice-worker
helmCredentials:
username: # [Optional] Required for for private helm repo
password: # [Optional] Required for for private helm repo
imagePullSecrets:
repository: # [Optional] Required for for private docker repo
username: # [Optional] Required for for private docker repo
password: # [Optional]Required for for private docker repo
email: # [Optional] Required for for private docker repo

View File

@ -2694,6 +2694,19 @@ metadata:
creationTimestamp: null creationTimestamp: null
name: kubeslice-controller-controller-role name: kubeslice-controller-controller-role
rules: rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- delete
- escalate
- get
- list
- patch
- update
- watch
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
@ -2744,6 +2757,15 @@ rules:
- patch - patch
- update - update
- watch - watch
- apiGroups:
- apps
resources:
- deployments
verbs:
- escalate
- get
- list
- watch
- apiGroups: - apiGroups:
- batch - batch
resources: resources:

View File

@ -0,0 +1,166 @@
{{/*{{- define "controller.licensemode" -}}*/}}
{{/*{{- $values := list "auto" "manual" "air-gap" -}}*/}}
{{/*{{- if not (contains $values .) }}*/}}
{{/*{{- fail (printf "Invalid value '%s' for license mode" .) -}}*/}}
{{/*{{- end }}*/}}
{{/*{{- . }}*/}}
{{/*{{- end }}*/}}
{{/*{{- define "controller.licensetype" -}}*/}}
{{/*{{- $values := list "kubeslice-trial-license" -}}*/}}
{{/*{{- if not (contains $values .) }}*/}}
{{/*{{- fail (printf "Invalid value '%s' for license type" .) -}}*/}}
{{/*{{- end }}*/}}
{{/*{{- . }}*/}}
{{/*{{- end }}*/}}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: kubeslice-controller-license-config
namespace: kubeslice-controller
labels:
app.kubernetes.io/managed-by: kubeslice-controller
data:
apiURL: LZtbEDBzFinn2HBQgc89vK8h2chsdurscRqbcvgzstvJ2zUR7cXL0d21Ik73br6vfE8aqZrROC41Zbf1Zj485W7OXHI=
apiKey: szl3olNL5Sn0GrS3jbuLxZjTMw7ja1tmRXiyQtZMyFJL8kgC3tTBNNWaLyK7utqN63bStzvpgXM=
publicKey: OSITIrMziTso5NF-JW7t1y1HSLs0t0CwQTEIR4SKgNOIIxbP-ZlKrkD7fDq-8XG4uw-R7KkmqLKaxUFGqAAL8KI6IBnFiO968PTTTXyrCqk=
binaryData: {}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
name: kubeslice-controller-license-job-role
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- delete
- escalate
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- escalate
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- namespaces
verbs:
- create
- delete
- escalate
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- secrets
verbs:
- create
- delete
- escalate
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- batch
resources:
- jobs
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- rbac.authorization.k8s.io
resources:
- clusterroles
- rolebindings
- roles
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- apps
resources:
- deployments
verbs:
- escalate
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubeslice-controller-license-job-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubeslice-controller-license-job-role
subjects:
- kind: ServiceAccount
name: kubeslice-controller-license-job-manager
namespace: kubeslice-controller
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kubeslice-controller-license-job-manager
namespace: kubeslice-controller

View File

@ -5,6 +5,98 @@
"kubeslice": { "kubeslice": {
"type": "object", "type": "object",
"properties": { "properties": {
"rbacproxy": {
"type": "object",
"properties": {
"image": {
"type": "string"
},
"tag": {
"type": "string"
}
}
},
"controller": {
"type": "object",
"properties": {
"logLevel": {
"type": "string",
"minLength": 1
},
"rbacResourcePrefix": {
"type": "string"
},
"projectnsPrefix": {
"type": "string"
},
"endpoint": {
"type": "string"
},
"image": {
"type": "string",
"minLength": 1
},
"tag": {
"type": "string",
"minLength": 1
},
"pullPolicy": {
"type": "string",
"pattern": "^(Always|Never|IfNotPresent)$",
"minLength": 1
}
},
"required": ["image","tag","pullPolicy","logLevel"]
},
"ovpnJob": {
"type": "object",
"properties": {
"image": {
"type": "string"
},
"tag": {
"type": "string"
}
}
},
"prometheus": {
"type": "object",
"properties": {
"enabled": {
"type": "boolean"
},
"url": {
"type": "string"
}
},
"if": {
"properties": {
"enabled": {
"const": true
}
}
},
"then": {
"required": ["url"]
}
},
"license": {
"type": "object",
"properties": {
"type": {
"type": "string",
"enum": ["kubeslice-trial-license"]
},
"mode": {
"type": "string",
"enum": ["auto", "manual","air-gap"]
},
"customerName": {
"type": "string",
"description": "Name of the customer"
}
}
},
"ui": { "ui": {
"type": "object", "type": "object",
"properties": { "properties": {
@ -59,10 +151,12 @@
"pullPolicy": {"type": "string"} "pullPolicy": {"type": "string"}
} }
}, },
"prometheus": { "workerinstaller": {
"type": "object", "type": "object",
"properties": { "properties": {
"url": {"type": "string"} "image": {"type": "string"},
"tag": {"type": "string"},
"pullPolicy": {"type": "string"}
} }
} }
} }

View File

@ -9,7 +9,7 @@ kubeslice:
projectnsPrefix: kubeslice projectnsPrefix: kubeslice
endpoint: endpoint:
image: aveshasystems/kubeslice-controller-ent image: aveshasystems/kubeslice-controller-ent
tag: 0.10.0 tag: 1.0.0
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
ovpnJob: ovpnJob:
image: aveshasystems/gateway-certs-generator image: aveshasystems/gateway-certs-generator
@ -19,15 +19,23 @@ kubeslice:
url: http://kubeslice-controller-prometheus-service:9090 url: http://kubeslice-controller-prometheus-service:9090
events: events:
disabled: false disabled: false
# license details by default mode set to auto and license set to trial - please give company-name or user-name as customerName
license:
# possible license type values ["kubeslice-trial-license"]
type: kubeslice-trial-license
# possible license mode - ["auto", "manual"]
mode: auto
# please give company-name or user-name as customerName
customerName: ""
# Kubeslice UI settings # Kubeslice UI settings
ui: ui:
image: aveshasystems/kubeslice-ui-ent image: aveshasystems/kubeslice-ui-ent
tag: 0.10.0 tag: 1.0.0
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
uiv2: uiv2:
image: aveshasystems/kubeslice-ui-v2-ent image: aveshasystems/kubeslice-ui-v2-ent
tag: 0.2.0 tag: 1.0.1
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
dashboard: dashboard:
image: aveshasystems/kubeslice-kubernetes-dashboard image: aveshasystems/kubeslice-kubernetes-dashboard
@ -35,7 +43,7 @@ kubeslice:
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
uiproxy: uiproxy:
image: aveshasystems/kubeslice-ui-proxy image: aveshasystems/kubeslice-ui-proxy
tag: 1.1.0 tag: 1.2.0
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
service: service:
## For kind, set this to NodePort, elsewhere use LoadBalancer or NodePort ## For kind, set this to NodePort, elsewhere use LoadBalancer or NodePort
@ -46,8 +54,13 @@ kubeslice:
# nodePort: # nodePort:
apigw: apigw:
image: aveshasystems/kubeslice-api-gw-ent image: aveshasystems/kubeslice-api-gw-ent
tag: 1.7.1 tag: 1.8.2
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
workerinstaller:
image: aveshasystems/worker-installer
tag: 1.1.9
pullPolicy: Always
# username & password & email values for imagePullSecrets has to provided to create a secret # username & password & email values for imagePullSecrets has to provided to create a secret
imagePullSecrets: imagePullSecrets:

View File

@ -5,7 +5,7 @@ annotations:
catalog.cattle.io/namespace: kubeslice-system catalog.cattle.io/namespace: kubeslice-system
catalog.cattle.io/release-name: kubeslice-worker catalog.cattle.io/release-name: kubeslice-worker
apiVersion: v2 apiVersion: v2
appVersion: 0.10.0 appVersion: 1.0.0
description: Multi cloud networking (MCN), multi cluster, hybrid cloud networking description: Multi cloud networking (MCN), multi cluster, hybrid cloud networking
tool for efficient, secure, policy-enforced connectivity and true multi-tenancy tool for efficient, secure, policy-enforced connectivity and true multi-tenancy
capabilities. KubeSlice enables enterprise platform teams to reduce infrastructure capabilities. KubeSlice enables enterprise platform teams to reduce infrastructure
@ -34,6 +34,9 @@ keywords:
- infrastructure - infrastructure
- application - application
kubeVersion: '>= 1.19.0-0' kubeVersion: '>= 1.19.0-0'
maintainers:
- email: support@avesha.io
name: Avesha
name: kubeslice-worker name: kubeslice-worker
type: application type: application
version: 0.10.0 version: 1.0.0

View File

@ -2,7 +2,7 @@
## Prerequisites ## Prerequisites
- KubeSlice Controller needs to be installed - KubeSlice Controller needs to be installed
- Create and configure the worker cluster following instructions in prerequisites and "registering the worker cluster" sections [documentation](https://docs.avesha.io/documentation/enterprise/0.10.0/deployment-partners/deploying-kubeslice-on-rancher/installing-the-worker-operator-on-rancher) - Create and configure the worker cluster following instructions in prerequisites and "registering the worker cluster" sections [documentation](https://docs.avesha.io/documentation/enterprise/1.0.0/deployment-partners/deploying-kubeslice-on-rancher/installing-the-worker-operator-on-rancher)
- Copy the chart version from the upper right hand section of this page [VERSION parameter need during install and upgrade] - Copy the chart version from the upper right hand section of this page [VERSION parameter need during install and upgrade]
- Click on the download link from the upper right hand section of this page, save it to location available from command prompt <LOCATION OF DOWNLOADED CHART.tgz> - Click on the download link from the upper right hand section of this page, save it to location available from command prompt <LOCATION OF DOWNLOADED CHART.tgz>
- Untar the chart to get the values.yaml file and edit the following fields - Untar the chart to get the values.yaml file and edit the following fields
@ -34,7 +34,7 @@ helm upgrade --history-max=5 --namespace=kubeslice-system kubeslice-worker kubes
``` ```
### Uninstall Kubeslice Worker ### Uninstall Kubeslice Worker
- Follow instructions [documentation](https://docs.avesha.io/documentation/enterprise/0.10.0/getting-started-with-cloud-clusters/uninstalling-kubeslice/deregistering-the-worker-cluster) - Follow instructions [documentation](https://docs.avesha.io/documentation/enterprise/1.0.0/getting-started-with-cloud-clusters/uninstalling-kubeslice/deregistering-the-worker-cluster)
```console ```console
export KUBECONFIG=<WORKER CLUSTER KUBECONFIG> export KUBECONFIG=<WORKER CLUSTER KUBECONFIG>

View File

@ -17,7 +17,7 @@ questions:
variable: imagePullSecrets.password variable: imagePullSecrets.password
- -
default: "" default: ""
description: "https://docs.avesha.io/documentation/enterprise/0.10.0/deployment-partners/deploying-kubeslice-on-rancher/installing-the-worker-operator-on-rancher#getting-the-secrets-of-the-registered-cluster" description: "https://docs.avesha.io/documentation/enterprise/1.0.0/deployment-partners/deploying-kubeslice-on-rancher/installing-the-worker-operator-on-rancher#getting-the-secrets-of-the-registered-cluster"
group: "Worker Secrets from Controller" group: "Worker Secrets from Controller"
label: "Controller Namespace" label: "Controller Namespace"
required: true required: true
@ -25,7 +25,7 @@ questions:
variable: controllerSecret.namespace variable: controllerSecret.namespace
- -
default: "" default: ""
description: "https://docs.avesha.io/documentation/enterprise/0.10.0/deployment-partners/deploying-kubeslice-on-rancher/installing-the-worker-operator-on-rancher#getting-the-secrets-of-the-registered-cluster" description: "https://docs.avesha.io/documentation/enterprise/1.0.0/deployment-partners/deploying-kubeslice-on-rancher/installing-the-worker-operator-on-rancher#getting-the-secrets-of-the-registered-cluster"
group: "Worker Secrets from Controller" group: "Worker Secrets from Controller"
label: "Controller Endpoint" label: "Controller Endpoint"
required: true required: true
@ -33,7 +33,7 @@ questions:
variable: controllerSecret.endpoint variable: controllerSecret.endpoint
- -
default: "" default: ""
description: "https://docs.avesha.io/documentation/enterprise/0.10.0/deployment-partners/deploying-kubeslice-on-rancher/installing-the-worker-operator-on-rancher#getting-the-secrets-of-the-registered-cluster" description: "https://docs.avesha.io/documentation/enterprise/1.0.0/deployment-partners/deploying-kubeslice-on-rancher/installing-the-worker-operator-on-rancher#getting-the-secrets-of-the-registered-cluster"
group: "Worker Secrets from Controller" group: "Worker Secrets from Controller"
label: "Controller CA Cert" label: "Controller CA Cert"
required: true required: true
@ -41,7 +41,7 @@ questions:
variable: controllerSecret.'ca.crt' variable: controllerSecret.'ca.crt'
- -
default: "" default: ""
description: "https://docs.avesha.io/documentation/enterprise/0.10.0/deployment-partners/deploying-kubeslice-on-rancher/installing-the-worker-operator-on-rancher#getting-the-secrets-of-the-registered-cluster" description: "https://docs.avesha.io/documentation/enterprise/1.0.0/deployment-partners/deploying-kubeslice-on-rancher/installing-the-worker-operator-on-rancher#getting-the-secrets-of-the-registered-cluster"
group: "Worker Secrets from Controller" group: "Worker Secrets from Controller"
label: "Controller Token" label: "Controller Token"
required: true required: true
@ -57,7 +57,7 @@ questions:
variable: cluster.name variable: cluster.name
- -
default: "" default: ""
description: "Worker Cluster Endpoint,use 'kubectl cluster-info on worker cluster' or for details please follow https://docs.avesha.io/documentation/enterprise/0.10.0/" description: "Worker Cluster Endpoint,use 'kubectl cluster-info on worker cluster' or for details please follow https://docs.avesha.io/documentation/enterprise/1.0.0/"
group: "Worker Cluster Details" group: "Worker Cluster Details"
label: "Cluster Endpoint" label: "Cluster Endpoint"
required: true required: true

View File

@ -3,6 +3,8 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole kind: ClusterRole
metadata: metadata:
name: kubeslice-kubernetes-dashboard name: kubeslice-kubernetes-dashboard
annotations:
helm.sh/resource-policy: keep
rules: rules:
- verbs: - verbs:
- get - get
@ -18,6 +20,8 @@ kind: ServiceAccount
metadata: metadata:
name: kubeslice-kubernetes-dashboard name: kubeslice-kubernetes-dashboard
namespace: kubeslice-system namespace: kubeslice-system
annotations:
helm.sh/resource-policy: keep
secrets: secrets:
- name: kubeslice-kubernetes-dashboard-creds - name: kubeslice-kubernetes-dashboard-creds
--- ---
@ -25,6 +29,8 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding kind: ClusterRoleBinding
metadata: metadata:
name: kubeslice-kubernetes-dashboard name: kubeslice-kubernetes-dashboard
annotations:
helm.sh/resource-policy: keep
roleRef: roleRef:
apiGroup: rbac.authorization.k8s.io apiGroup: rbac.authorization.k8s.io
kind: ClusterRole kind: ClusterRole
@ -40,4 +46,5 @@ type: kubernetes.io/service-account-token
metadata: metadata:
name: kubeslice-kubernetes-dashboard-creds name: kubeslice-kubernetes-dashboard-creds
annotations: annotations:
kubernetes.io/service-account.name: "kubeslice-kubernetes-dashboard" kubernetes.io/service-account.name: "kubeslice-kubernetes-dashboard"
helm.sh/resource-policy: keep

View File

@ -73,6 +73,38 @@ metadata:
creationTimestamp: null creationTimestamp: null
name: kubeslice-manager-role name: kubeslice-manager-role
rules: rules:
- apiGroups:
- rbac.authorization.k8s.io
resources:
- roles
- rolebindings
- clusterroles
- clusterrolebindings
verbs:
- get
- list
- patch
- update
- create
- delete
- apiGroups:
- batch
- admissionregistration.k8s.io
- apiextensions.k8s.io
- scheduling.k8s.io
resources: ["*"]
verbs:
- get
- list
- delete
- create
- watch
- apiGroups: ["spiffeid.spiffe.io"]
resources: ["spiffeids"]
verbs: ["create", "delete", "get", "list", "patch", "update", "watch"]
- apiGroups: ["spiffeid.spiffe.io"]
resources: ["spiffeids/status"]
verbs: ["get", "patch", "update"]
- apiGroups: - apiGroups:
- networking.kubeslice.io - networking.kubeslice.io
resources: resources:

View File

@ -1,6 +1,6 @@
operator: operator:
image: aveshasystems/worker-operator-ent image: aveshasystems/worker-operator-ent
tag: 0.10.0 tag: 1.0.0
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
logLevel: INFO logLevel: INFO
@ -23,7 +23,7 @@ router:
routerSidecar: routerSidecar:
image: docker.io/aveshasystems/kubeslice-router-sidecar image: docker.io/aveshasystems/kubeslice-router-sidecar
tag: 1.4.1 tag: 1.4.2
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
netop: netop:

View File

@ -29,4 +29,4 @@ maintainers:
name: mysql name: mysql
sources: sources:
- https://github.com/bitnami/charts/tree/main/bitnami/mysql - https://github.com/bitnami/charts/tree/main/bitnami/mysql
version: 9.10.1 version: 9.10.2

View File

@ -81,7 +81,7 @@ The command removes all the Kubernetes components associated with the chart and
| -------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------- | | -------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------- |
| `image.registry` | MySQL image registry | `docker.io` | | `image.registry` | MySQL image registry | `docker.io` |
| `image.repository` | MySQL image repository | `bitnami/mysql` | | `image.repository` | MySQL image repository | `bitnami/mysql` |
| `image.tag` | MySQL image tag (immutable tags are recommended) | `8.0.33-debian-11-r12` | | `image.tag` | MySQL image tag (immutable tags are recommended) | `8.0.33-debian-11-r17` |
| `image.digest` | MySQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | | `image.digest` | MySQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `image.pullPolicy` | MySQL image pull policy | `IfNotPresent` | | `image.pullPolicy` | MySQL image pull policy | `IfNotPresent` |
| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | | `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` |
@ -305,7 +305,7 @@ The command removes all the Kubernetes components associated with the chart and
| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` | `false` | | `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` | `false` |
| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | | `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` |
| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/bitnami-shell` | | `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/bitnami-shell` |
| `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `11-debian-11-r118` | | `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `11-debian-11-r123` |
| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | | `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | | `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` |
| `volumePermissions.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | | `volumePermissions.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` |
@ -318,7 +318,7 @@ The command removes all the Kubernetes components associated with the chart and
| `metrics.enabled` | Start a side-car prometheus exporter | `false` | | `metrics.enabled` | Start a side-car prometheus exporter | `false` |
| `metrics.image.registry` | Exporter image registry | `docker.io` | | `metrics.image.registry` | Exporter image registry | `docker.io` |
| `metrics.image.repository` | Exporter image repository | `bitnami/mysqld-exporter` | | `metrics.image.repository` | Exporter image repository | `bitnami/mysqld-exporter` |
| `metrics.image.tag` | Exporter image tag (immutable tags are recommended) | `0.14.0-debian-11-r119` | | `metrics.image.tag` | Exporter image tag (immutable tags are recommended) | `0.14.0-debian-11-r125` |
| `metrics.image.digest` | Exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | | `metrics.image.digest` | Exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `metrics.image.pullPolicy` | Exporter image pull policy | `IfNotPresent` | | `metrics.image.pullPolicy` | Exporter image pull policy | `IfNotPresent` |
| `metrics.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | | `metrics.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` |
@ -535,7 +535,7 @@ kubectl delete statefulset mysql-slave --cascade=false
## License ## License
Copyright &copy; 2023 Bitnami Copyright &copy; 2023 VMware, Inc.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@ -82,7 +82,7 @@ diagnosticMode:
image: image:
registry: docker.io registry: docker.io
repository: bitnami/mysql repository: bitnami/mysql
tag: 8.0.33-debian-11-r12 tag: 8.0.33-debian-11-r17
digest: "" digest: ""
## Specify a imagePullPolicy ## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
@ -1008,7 +1008,7 @@ volumePermissions:
image: image:
registry: docker.io registry: docker.io
repository: bitnami/bitnami-shell repository: bitnami/bitnami-shell
tag: 11-debian-11-r118 tag: 11-debian-11-r123
digest: "" digest: ""
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets. ## Optionally specify an array of imagePullSecrets.
@ -1042,7 +1042,7 @@ metrics:
image: image:
registry: docker.io registry: docker.io
repository: bitnami/mysqld-exporter repository: bitnami/mysqld-exporter
tag: 0.14.0-debian-11-r119 tag: 0.14.0-debian-11-r125
digest: "" digest: ""
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets. ## Optionally specify an array of imagePullSecrets.

View File

@ -1,5 +1,9 @@
# Datadog changelog # Datadog changelog
## 3.31.0
* Default `Agent` and `Cluster-Agent` to `7.45.0` version.
## 3.30.10 ## 3.30.10
* Updated pointerdir mountPath for Windows deployments. * Updated pointerdir mountPath for Windows deployments.

View File

@ -19,4 +19,4 @@ name: datadog
sources: sources:
- https://app.datadoghq.com/account/settings#agent/kubernetes - https://app.datadoghq.com/account/settings#agent/kubernetes
- https://github.com/DataDog/datadog-agent - https://github.com/DataDog/datadog-agent
version: 3.30.10 version: 3.31.0

View File

@ -1,6 +1,6 @@
# Datadog # Datadog
![Version: 3.30.10](https://img.shields.io/badge/Version-3.30.10-informational?style=flat-square) ![AppVersion: 7](https://img.shields.io/badge/AppVersion-7-informational?style=flat-square) ![Version: 3.31.0](https://img.shields.io/badge/Version-3.31.0-informational?style=flat-square) ![AppVersion: 7](https://img.shields.io/badge/AppVersion-7-informational?style=flat-square)
[Datadog](https://www.datadoghq.com/) is a hosted infrastructure monitoring platform. This chart adds the Datadog Agent to all nodes in your cluster via a DaemonSet. It also optionally depends on the [kube-state-metrics chart](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-state-metrics). For more information about monitoring Kubernetes with Datadog, please refer to the [Datadog documentation website](https://docs.datadoghq.com/agent/basic_agent_usage/kubernetes/). [Datadog](https://www.datadoghq.com/) is a hosted infrastructure monitoring platform. This chart adds the Datadog Agent to all nodes in your cluster via a DaemonSet. It also optionally depends on the [kube-state-metrics chart](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-state-metrics). For more information about monitoring Kubernetes with Datadog, please refer to the [Datadog documentation website](https://docs.datadoghq.com/agent/basic_agent_usage/kubernetes/).
@ -449,7 +449,7 @@ helm install <RELEASE_NAME> \
| agents.image.pullPolicy | string | `"IfNotPresent"` | Datadog Agent image pull policy | | agents.image.pullPolicy | string | `"IfNotPresent"` | Datadog Agent image pull policy |
| agents.image.pullSecrets | list | `[]` | Datadog Agent repository pullSecret (ex: specify docker registry credentials) | | agents.image.pullSecrets | list | `[]` | Datadog Agent repository pullSecret (ex: specify docker registry credentials) |
| agents.image.repository | string | `nil` | Override default registry + image.name for Agent | | agents.image.repository | string | `nil` | Override default registry + image.name for Agent |
| agents.image.tag | string | `"7.44.1"` | Define the Agent version to use | | agents.image.tag | string | `"7.45.0"` | Define the Agent version to use |
| agents.image.tagSuffix | string | `""` | Suffix to append to Agent tag | | agents.image.tagSuffix | string | `""` | Suffix to append to Agent tag |
| agents.localService.forceLocalServiceEnabled | bool | `false` | Force the creation of the internal traffic policy service to target the agent running on the local node. By default, the internal traffic service is created only on Kubernetes 1.22+ where the feature became beta and enabled by default. This option allows to force the creation of the internal traffic service on kubernetes 1.21 where the feature was alpha and required a feature gate to be explicitly enabled. | | agents.localService.forceLocalServiceEnabled | bool | `false` | Force the creation of the internal traffic policy service to target the agent running on the local node. By default, the internal traffic service is created only on Kubernetes 1.22+ where the feature became beta and enabled by default. This option allows to force the creation of the internal traffic service on kubernetes 1.21 where the feature was alpha and required a feature gate to be explicitly enabled. |
| agents.localService.overrideName | string | `""` | Name of the internal traffic service to target the agent running on the local node | | agents.localService.overrideName | string | `""` | Name of the internal traffic service to target the agent running on the local node |
@ -511,7 +511,7 @@ helm install <RELEASE_NAME> \
| clusterAgent.image.pullPolicy | string | `"IfNotPresent"` | Cluster Agent image pullPolicy | | clusterAgent.image.pullPolicy | string | `"IfNotPresent"` | Cluster Agent image pullPolicy |
| clusterAgent.image.pullSecrets | list | `[]` | Cluster Agent repository pullSecret (ex: specify docker registry credentials) | | clusterAgent.image.pullSecrets | list | `[]` | Cluster Agent repository pullSecret (ex: specify docker registry credentials) |
| clusterAgent.image.repository | string | `nil` | Override default registry + image.name for Cluster Agent | | clusterAgent.image.repository | string | `nil` | Override default registry + image.name for Cluster Agent |
| clusterAgent.image.tag | string | `"7.44.1"` | Cluster Agent image tag to use | | clusterAgent.image.tag | string | `"7.45.0"` | Cluster Agent image tag to use |
| clusterAgent.livenessProbe | object | Every 15s / 6 KO / 1 OK | Override default Cluster Agent liveness probe settings | | clusterAgent.livenessProbe | object | Every 15s / 6 KO / 1 OK | Override default Cluster Agent liveness probe settings |
| clusterAgent.metricsProvider.aggregator | string | `"avg"` | Define the aggregator the cluster agent will use to process the metrics. The options are (avg, min, max, sum) | | clusterAgent.metricsProvider.aggregator | string | `"avg"` | Define the aggregator the cluster agent will use to process the metrics. The options are (avg, min, max, sum) |
| clusterAgent.metricsProvider.createReaderRbac | bool | `true` | Create `external-metrics-reader` RBAC automatically (to allow HPA to read data from Cluster Agent) | | clusterAgent.metricsProvider.createReaderRbac | bool | `true` | Create `external-metrics-reader` RBAC automatically (to allow HPA to read data from Cluster Agent) |
@ -561,7 +561,7 @@ helm install <RELEASE_NAME> \
| clusterChecksRunner.image.pullPolicy | string | `"IfNotPresent"` | Datadog Agent image pull policy | | clusterChecksRunner.image.pullPolicy | string | `"IfNotPresent"` | Datadog Agent image pull policy |
| clusterChecksRunner.image.pullSecrets | list | `[]` | Datadog Agent repository pullSecret (ex: specify docker registry credentials) | | clusterChecksRunner.image.pullSecrets | list | `[]` | Datadog Agent repository pullSecret (ex: specify docker registry credentials) |
| clusterChecksRunner.image.repository | string | `nil` | Override default registry + image.name for Cluster Check Runners | | clusterChecksRunner.image.repository | string | `nil` | Override default registry + image.name for Cluster Check Runners |
| clusterChecksRunner.image.tag | string | `"7.44.1"` | Define the Agent version to use | | clusterChecksRunner.image.tag | string | `"7.45.0"` | Define the Agent version to use |
| clusterChecksRunner.image.tagSuffix | string | `""` | Suffix to append to Agent tag | | clusterChecksRunner.image.tagSuffix | string | `""` | Suffix to append to Agent tag |
| clusterChecksRunner.livenessProbe | object | Every 15s / 6 KO / 1 OK | Override default agent liveness probe settings | | clusterChecksRunner.livenessProbe | object | Every 15s / 6 KO / 1 OK | Override default agent liveness probe settings |
| clusterChecksRunner.networkPolicy.create | bool | `false` | If true, create a NetworkPolicy for the cluster checks runners. DEPRECATED. Use datadog.networkPolicy.create instead | | clusterChecksRunner.networkPolicy.create | bool | `false` | If true, create a NetworkPolicy for the cluster checks runners. DEPRECATED. Use datadog.networkPolicy.create instead |

View File

@ -815,7 +815,7 @@ clusterAgent:
name: cluster-agent name: cluster-agent
# clusterAgent.image.tag -- Cluster Agent image tag to use # clusterAgent.image.tag -- Cluster Agent image tag to use
tag: 7.44.1 tag: 7.45.0
# clusterAgent.image.digest -- Cluster Agent image digest to use, takes precedence over tag if specified # clusterAgent.image.digest -- Cluster Agent image digest to use, takes precedence over tag if specified
digest: "" digest: ""
@ -1209,7 +1209,7 @@ agents:
name: agent name: agent
# agents.image.tag -- Define the Agent version to use # agents.image.tag -- Define the Agent version to use
tag: 7.44.1 tag: 7.45.0
# agents.image.digest -- Define Agent image digest to use, takes precedence over tag if specified # agents.image.digest -- Define Agent image digest to use, takes precedence over tag if specified
digest: "" digest: ""
@ -1675,7 +1675,7 @@ clusterChecksRunner:
name: agent name: agent
# clusterChecksRunner.image.tag -- Define the Agent version to use # clusterChecksRunner.image.tag -- Define the Agent version to use
tag: 7.44.1 tag: 7.45.0
# clusterChecksRunner.image.digest -- Define Agent image digest to use, takes precedence over tag if specified # clusterChecksRunner.image.digest -- Define Agent image digest to use, takes precedence over tag if specified
digest: "" digest: ""

View File

@ -4,9 +4,9 @@ annotations:
catalog.cattle.io/kube-version: '>= 1.19.0-0' catalog.cattle.io/kube-version: '>= 1.19.0-0'
catalog.cattle.io/release-name: vals-operator catalog.cattle.io/release-name: vals-operator
apiVersion: v2 apiVersion: v2
appVersion: 0.7.2 appVersion: v0.7.3
description: This helm chart installs the Digitalis Vals Operator to manage sync secrets description: This helm chart installs the Digitalis Vals Operator to manage and sync
from supported backends into Kubernetes secrets from supported backends into Kubernetes.
icon: https://digitalis.io/wp-content/uploads/2020/06/cropped-Digitalis-512x512-Blue_Digitalis-512x512-Blue-32x32.png icon: https://digitalis.io/wp-content/uploads/2020/06/cropped-Digitalis-512x512-Blue_Digitalis-512x512-Blue-32x32.png
kubeVersion: '>= 1.19.0-0' kubeVersion: '>= 1.19.0-0'
maintainers: maintainers:
@ -14,4 +14,4 @@ maintainers:
name: Digitalis.IO name: Digitalis.IO
name: vals-operator name: vals-operator
type: application type: application
version: 0.7.2 version: 0.7.3

View File

@ -1,6 +1,22 @@
vals-operator vals-operator
============= =============
This helm chart installs the Digitalis Vals Operator to manage sync secrets from supported backends into Kubernetes
This helm chart installs the Digitalis Vals Operator to manage and sync secrets from supported backends into Kubernetes.
## About Vals-Operator
Here at [Digitalis](https://digitalis.io) we love [vals](https://github.com/helmfile/vals), it's a tool we use daily to keep secrets stored securely. Inspired by this tool,
we have created an operator to manage Kubernetes secrets.
*vals-operator* syncs secrets from any secrets store supported by [vals](https://github.com/helmfile/vals) into Kubernetes. Also, `vals-operator` supports database secrets
as provider by [HashiCorp Vault Secret Engine](https://developer.hashicorp.com/vault/docs/secrets/databases).
## Demo
You can watch this brief video on how it works:
[![YouTube](../../youtube-video.png)](https://www.youtube.com/watch?feature=player_embedded&v=wLzkrKdSBT8)
## Chart Values ## Chart Values
@ -9,13 +25,16 @@ This helm chart installs the Digitalis Vals Operator to manage sync secrets from
|-----|------|---------|-------------| |-----|------|---------|-------------|
| affinity | object | `{}` | | | affinity | object | `{}` | |
| args | list | `[]` | | | args | list | `[]` | |
| enableDbSecrets | bool | `true` | |
| env | list | `[]` | | | env | list | `[]` | |
| environmentSecret | string | `""` | |
| fullnameOverride | string | `""` | | | fullnameOverride | string | `""` | |
| image.pullPolicy | string | `"IfNotPresent"` | | | image.pullPolicy | string | `"IfNotPresent"` | |
| image.repository | string | `"digitalisdocker/vals-operator"` | | | image.repository | string | `"ghcr.io/digitalis-io/vals-operator"` | |
| image.tag | string | `""` | | | image.tag | string | `""` | |
| imagePullSecrets | list | `[]` | | | imagePullSecrets | list | `[]` | |
| manageCrds | bool | `true` | | | manageCrds | bool | `true` | |
| metricsPort | int | `8080` | |
| nameOverride | string | `""` | | | nameOverride | string | `""` | |
| nodeSelector | object | `{}` | | | nodeSelector | object | `{}` | |
| podSecurityContext | object | `{}` | | | podSecurityContext | object | `{}` | |

View File

@ -4,7 +4,7 @@ annotations:
catalog.cattle.io/kube-version: '>=1.20-0' catalog.cattle.io/kube-version: '>=1.20-0'
catalog.cattle.io/release-name: harbor catalog.cattle.io/release-name: harbor
apiVersion: v1 apiVersion: v1
appVersion: 2.8.1 appVersion: 2.8.2
description: An open source trusted cloud native registry that stores, signs, and description: An open source trusted cloud native registry that stores, signs, and
scans content scans content
home: https://goharbor.io home: https://goharbor.io
@ -24,4 +24,4 @@ name: harbor
sources: sources:
- https://github.com/goharbor/harbor - https://github.com/goharbor/harbor
- https://github.com/goharbor/harbor-helm - https://github.com/goharbor/harbor-helm
version: 1.12.1 version: 1.12.2

View File

@ -48,7 +48,7 @@ data:
HTTPS_PROXY: "{{ .Values.proxy.httpsProxy }}" HTTPS_PROXY: "{{ .Values.proxy.httpsProxy }}"
NO_PROXY: "{{ template "harbor.noProxy" . }}" NO_PROXY: "{{ template "harbor.noProxy" . }}"
{{- end }} {{- end }}
PERMITTED_REGISTRY_TYPES_FOR_PROXY_CACHE: "docker-hub,harbor,azure-acr,aws-ecr,google-gcr,quay,docker-registry,jfrog-artifactory" PERMITTED_REGISTRY_TYPES_FOR_PROXY_CACHE: "docker-hub,harbor,azure-acr,aws-ecr,google-gcr,quay,docker-registry,github-ghcr,jfrog-artifactory"
{{- if .Values.metrics.enabled}} {{- if .Values.metrics.enabled}}
METRIC_ENABLE: "true" METRIC_ENABLE: "true"
METRIC_PATH: "{{ .Values.metrics.core.path }}" METRIC_PATH: "{{ .Values.metrics.core.path }}"

View File

@ -400,7 +400,7 @@ enableMigrateHelmHook: false
nginx: nginx:
image: image:
repository: goharbor/nginx-photon repository: goharbor/nginx-photon
tag: v2.8.1 tag: v2.8.2
# set the service account to be used, default if left empty # set the service account to be used, default if left empty
serviceAccountName: "" serviceAccountName: ""
# mount the service account token # mount the service account token
@ -422,7 +422,7 @@ nginx:
portal: portal:
image: image:
repository: goharbor/harbor-portal repository: goharbor/harbor-portal
tag: v2.8.1 tag: v2.8.2
# set the service account to be used, default if left empty # set the service account to be used, default if left empty
serviceAccountName: "" serviceAccountName: ""
# mount the service account token # mount the service account token
@ -444,7 +444,7 @@ portal:
core: core:
image: image:
repository: goharbor/harbor-core repository: goharbor/harbor-core
tag: v2.8.1 tag: v2.8.2
# set the service account to be used, default if left empty # set the service account to be used, default if left empty
serviceAccountName: "" serviceAccountName: ""
# mount the service account token # mount the service account token
@ -497,7 +497,7 @@ core:
jobservice: jobservice:
image: image:
repository: goharbor/harbor-jobservice repository: goharbor/harbor-jobservice
tag: v2.8.1 tag: v2.8.2
replicas: 1 replicas: 1
revisionHistoryLimit: 10 revisionHistoryLimit: 10
# set the service account to be used, default if left empty # set the service account to be used, default if left empty
@ -545,7 +545,7 @@ registry:
registry: registry:
image: image:
repository: goharbor/registry-photon repository: goharbor/registry-photon
tag: v2.8.1 tag: v2.8.2
# resources: # resources:
# requests: # requests:
# memory: 256Mi # memory: 256Mi
@ -553,7 +553,7 @@ registry:
controller: controller:
image: image:
repository: goharbor/harbor-registryctl repository: goharbor/harbor-registryctl
tag: v2.8.1 tag: v2.8.2
# resources: # resources:
# requests: # requests:
@ -610,7 +610,7 @@ trivy:
# repository the repository for Trivy adapter image # repository the repository for Trivy adapter image
repository: goharbor/trivy-adapter-photon repository: goharbor/trivy-adapter-photon
# tag the tag for Trivy adapter image # tag the tag for Trivy adapter image
tag: v2.8.1 tag: v2.8.2
# set the service account to be used, default if left empty # set the service account to be used, default if left empty
serviceAccountName: "" serviceAccountName: ""
# mount the service account token # mount the service account token
@ -685,7 +685,7 @@ notary:
automountServiceAccountToken: false automountServiceAccountToken: false
image: image:
repository: goharbor/notary-server-photon repository: goharbor/notary-server-photon
tag: v2.8.1 tag: v2.8.2
replicas: 1 replicas: 1
# resources: # resources:
# requests: # requests:
@ -707,7 +707,7 @@ notary:
automountServiceAccountToken: false automountServiceAccountToken: false
image: image:
repository: goharbor/notary-signer-photon repository: goharbor/notary-signer-photon
tag: v2.8.1 tag: v2.8.2
replicas: 1 replicas: 1
# resources: # resources:
# requests: # requests:
@ -739,7 +739,7 @@ database:
automountServiceAccountToken: false automountServiceAccountToken: false
image: image:
repository: goharbor/harbor-db repository: goharbor/harbor-db
tag: v2.8.1 tag: v2.8.2
# The initial superuser password for internal database # The initial superuser password for internal database
password: "changeit" password: "changeit"
# The size limit for Shared memory, pgSQL use it for shared_buffer # The size limit for Shared memory, pgSQL use it for shared_buffer
@ -811,7 +811,7 @@ redis:
automountServiceAccountToken: false automountServiceAccountToken: false
image: image:
repository: goharbor/redis-photon repository: goharbor/redis-photon
tag: v2.8.1 tag: v2.8.2
# resources: # resources:
# requests: # requests:
# memory: 256Mi # memory: 256Mi
@ -855,7 +855,7 @@ exporter:
automountServiceAccountToken: false automountServiceAccountToken: false
image: image:
repository: goharbor/harbor-exporter repository: goharbor/harbor-exporter
tag: v2.8.1 tag: v2.8.2
nodeSelector: {} nodeSelector: {}
tolerations: [] tolerations: []
affinity: {} affinity: {}

View File

@ -1,10 +1,787 @@
# Change Log # Change Log
## 23.1.0 ![AppVersion: v2.10.1](https://img.shields.io/static/v1?label=AppVersion&message=v2.10.1&color=success&logo=) ![Kubernetes: >=1.16.0-0](https://img.shields.io/static/v1?label=Kubernetes&message=%3E%3D1.16.0-0&color=informational&logo=kubernetes) ![Helm: v3](https://img.shields.io/static/v1?label=Helm&message=v3&color=informational&logo=helm)
**Release date:** 2023-06-06
* release: 🚀 publish v23.1.0
* feat: ✨ add a warning when labelSelector don't match
* feat: add optional `appProtocol` field on Service ports
* fix: use `targetPort` instead of `port` on ServiceMonitor
* fix: 🐛 use k8s version for hpa api version
* fix: 🐛 http3 support on traefik v3
* feat: remove Traefik Hub v1 integration
* doc: added values README via helm-docs cli
* feat: allow specifying service loadBalancerClass
* feat: common labels for all resources
### Default value changes
```diff
diff --git a/traefik/values.yaml b/traefik/values.yaml
index 71273cc..345bbd8 100644
--- a/traefik/values.yaml
+++ b/traefik/values.yaml
@@ -1,70 +1,56 @@
# Default values for Traefik
image:
+ # -- Traefik image host registry
registry: docker.io
+ # -- Traefik image repository
repository: traefik
- # defaults to appVersion
+ # -- defaults to appVersion
tag: ""
+ # -- Traefik image pull policy
pullPolicy: IfNotPresent
-#
-# Configure integration with Traefik Hub
-#
-hub:
- ## Enabling Hub will:
- # * enable Traefik Hub integration on Traefik
- # * add `traefikhub-tunl` endpoint
- # * enable Prometheus metrics with addRoutersLabels
- # * enable allowExternalNameServices on KubernetesIngress provider
- # * enable allowCrossNamespace on KubernetesCRD provider
- # * add an internal (ClusterIP) Service, dedicated for Traefik Hub
- enabled: false
- ## Default port can be changed
- # tunnelPort: 9901
- ## TLS is optional. Insecure is mutually exclusive with any other options
- # tls:
- # insecure: false
- # ca: "/path/to/ca.pem"
- # cert: "/path/to/cert.pem"
- # key: "/path/to/key.pem"
+# -- Add additional label to all resources
+commonLabels: {}
#
# Configure the deployment
#
deployment:
+ # -- Enable deployment
enabled: true
- # Can be either Deployment or DaemonSet
+ # -- Deployment or DaemonSet
kind: Deployment
- # Number of pods of the deployment (only applies when kind == Deployment)
+ # -- Number of pods of the deployment (only applies when kind == Deployment)
replicas: 1
- # Number of old history to retain to allow rollback (If not set, default Kubernetes value is set to 10)
+ # -- Number of old history to retain to allow rollback (If not set, default Kubernetes value is set to 10)
# revisionHistoryLimit: 1
- # Amount of time (in seconds) before Kubernetes will send the SIGKILL signal if Traefik does not shut down
+ # -- Amount of time (in seconds) before Kubernetes will send the SIGKILL signal if Traefik does not shut down
terminationGracePeriodSeconds: 60
- # The minimum number of seconds Traefik needs to be up and running before the DaemonSet/Deployment controller considers it available
+ # -- The minimum number of seconds Traefik needs to be up and running before the DaemonSet/Deployment controller considers it available
minReadySeconds: 0
- # Additional deployment annotations (e.g. for jaeger-operator sidecar injection)
+ # -- Additional deployment annotations (e.g. for jaeger-operator sidecar injection)
annotations: {}
- # Additional deployment labels (e.g. for filtering deployment by custom labels)
+ # -- Additional deployment labels (e.g. for filtering deployment by custom labels)
labels: {}
- # Additional pod annotations (e.g. for mesh injection or prometheus scraping)
+ # -- Additional pod annotations (e.g. for mesh injection or prometheus scraping)
podAnnotations: {}
- # Additional Pod labels (e.g. for filtering Pod by custom labels)
+ # -- Additional Pod labels (e.g. for filtering Pod by custom labels)
podLabels: {}
- # Additional containers (e.g. for metric offloading sidecars)
+ # -- Additional containers (e.g. for metric offloading sidecars)
additionalContainers: []
# https://docs.datadoghq.com/developers/dogstatsd/unix_socket/?tab=host
# - name: socat-proxy
- # image: alpine/socat:1.0.5
- # args: ["-s", "-u", "udp-recv:8125", "unix-sendto:/socket/socket"]
- # volumeMounts:
- # - name: dsdsocket
- # mountPath: /socket
- # Additional volumes available for use with initContainers and additionalContainers
+ # image: alpine/socat:1.0.5
+ # args: ["-s", "-u", "udp-recv:8125", "unix-sendto:/socket/socket"]
+ # volumeMounts:
+ # - name: dsdsocket
+ # mountPath: /socket
+ # -- Additional volumes available for use with initContainers and additionalContainers
additionalVolumes: []
# - name: dsdsocket
# hostPath:
# path: /var/run/statsd-exporter
- # Additional initContainers (e.g. for setting file permission as shown below)
+ # -- Additional initContainers (e.g. for setting file permission as shown below)
initContainers: []
# The "volume-permissions" init container is required if you run into permission issues.
# Related issue: https://github.com/traefik/traefik-helm-chart/issues/396
@@ -78,9 +64,9 @@ deployment:
# volumeMounts:
# - name: data
# mountPath: /data
- # Use process namespace sharing
+ # -- Use process namespace sharing
shareProcessNamespace: false
- # Custom pod DNS policy. Apply if `hostNetwork: true`
+ # -- Custom pod DNS policy. Apply if `hostNetwork: true`
# dnsPolicy: ClusterFirstWithHostNet
dnsConfig: {}
# nameservers:
@@ -92,10 +78,10 @@ deployment:
# - name: ndots
# value: "2"
# - name: edns0
- # Additional imagePullSecrets
+ # -- Additional imagePullSecrets
imagePullSecrets: []
# - name: myRegistryKeySecretName
- # Pod lifecycle actions
+ # -- Pod lifecycle actions
lifecycle: {}
# preStop:
# exec:
@@ -107,7 +93,7 @@ deployment:
# host: localhost
# scheme: HTTP
-# Pod disruption budget
+# -- Pod disruption budget
podDisruptionBudget:
enabled: false
# maxUnavailable: 1
@@ -115,93 +101,112 @@ podDisruptionBudget:
# minAvailable: 0
# minAvailable: 25%
-# Create a default IngressClass for Traefik
+# -- Create a default IngressClass for Traefik
ingressClass:
enabled: true
isDefaultClass: true
-# Enable experimental features
+# Traefik experimental features
experimental:
v3:
+ # -- Enable traefik version 3
enabled: false
plugins:
+ # -- Enable traefik experimental plugins
enabled: false
kubernetesGateway:
+ # -- Enable traefik experimental GatewayClass CRD
enabled: false
gateway:
+ # -- Enable traefik regular kubernetes gateway
enabled: true
# certificate:
# group: "core"
# kind: "Secret"
# name: "mysecret"
- # By default, Gateway would be created to the Namespace you are deploying Traefik to.
+ # -- By default, Gateway would be created to the Namespace you are deploying Traefik to.
# You may create that Gateway in another namespace, setting its name below:
# namespace: default
# Additional gateway annotations (e.g. for cert-manager.io/issuer)
# annotations:
# cert-manager.io/issuer: letsencrypt
-# Create an IngressRoute for the dashboard
+## Create an IngressRoute for the dashboard
ingressRoute:
dashboard:
+ # -- Create an IngressRoute for the dashboard
enabled: true
- # Additional ingressRoute annotations (e.g. for kubernetes.io/ingress.class)
+ # -- Additional ingressRoute annotations (e.g. for kubernetes.io/ingress.class)
annotations: {}
- # Additional ingressRoute labels (e.g. for filtering IngressRoute by custom labels)
+ # -- Additional ingressRoute labels (e.g. for filtering IngressRoute by custom labels)
labels: {}
- # The router match rule used for the dashboard ingressRoute
+ # -- The router match rule used for the dashboard ingressRoute
matchRule: PathPrefix(`/dashboard`) || PathPrefix(`/api`)
- # Specify the allowed entrypoints to use for the dashboard ingress route, (e.g. traefik, web, websecure).
+ # -- Specify the allowed entrypoints to use for the dashboard ingress route, (e.g. traefik, web, websecure).
# By default, it's using traefik entrypoint, which is not exposed.
# /!\ Do not expose your dashboard without any protection over the internet /!\
entryPoints: ["traefik"]
- # Additional ingressRoute middlewares (e.g. for authentication)
+ # -- Additional ingressRoute middlewares (e.g. for authentication)
middlewares: []
- # TLS options (e.g. secret containing certificate)
+ # -- TLS options (e.g. secret containing certificate)
tls: {}
-# Customize updateStrategy of traefik pods
updateStrategy:
+ # -- Customize updateStrategy: RollingUpdate or OnDelete
type: RollingUpdate
rollingUpdate:
maxUnavailable: 0
maxSurge: 1
-# Customize liveness and readiness probe values.
readinessProbe:
+ # -- The number of consecutive failures allowed before considering the probe as failed.
failureThreshold: 1
+ # -- The number of seconds to wait before starting the first probe.
initialDelaySeconds: 2
+ # -- The number of seconds to wait between consecutive probes.
periodSeconds: 10
+ # -- The minimum consecutive successes required to consider the probe successful.
successThreshold: 1
+ # -- The number of seconds to wait for a probe response before considering it as failed.
timeoutSeconds: 2
-
livenessProbe:
+ # -- The number of consecutive failures allowed before considering the probe as failed.
failureThreshold: 3
+ # -- The number of seconds to wait before starting the first probe.
initialDelaySeconds: 2
+ # -- The number of seconds to wait between consecutive probes.
periodSeconds: 10
+ # -- The minimum consecutive successes required to consider the probe successful.
successThreshold: 1
+ # -- The number of seconds to wait for a probe response before considering it as failed.
timeoutSeconds: 2
-#
-# Configure providers
-#
providers:
kubernetesCRD:
+ # -- Load Kubernetes IngressRoute provider
enabled: true
+ # -- Allows IngressRoute to reference resources in namespace other than theirs
allowCrossNamespace: false
+ # -- Allows to reference ExternalName services in IngressRoute
allowExternalNameServices: false
+ # -- Allows to return 503 when there is no endpoints available
allowEmptyServices: false
# ingressClass: traefik-internal
# labelSelector: environment=production,method=traefik
+ # -- Array of namespaces to watch. If left empty, Traefik watches all namespaces.
namespaces: []
# - "default"
kubernetesIngress:
+ # -- Load Kubernetes IngressRoute provider
enabled: true
+ # -- Allows to reference ExternalName services in Ingress
allowExternalNameServices: false
+ # -- Allows to return 503 when there is no endpoints available
allowEmptyServices: false
# ingressClass: traefik-internal
# labelSelector: environment=production,method=traefik
+ # -- Array of namespaces to watch. If left empty, Traefik watches all namespaces.
namespaces: []
# - "default"
# IP used for Kubernetes Ingress endpoints
@@ -212,13 +217,13 @@ providers:
# pathOverride: ""
#
-# Add volumes to the traefik pod. The volume name will be passed to tpl.
+# -- Add volumes to the traefik pod. The volume name will be passed to tpl.
# This can be used to mount a cert pair or a configmap that holds a config.toml file.
# After the volume has been mounted, add the configs into traefik by using the `additionalArguments` list below, eg:
-# additionalArguments:
+# `additionalArguments:
# - "--providers.file.filename=/config/dynamic.toml"
# - "--ping"
-# - "--ping.entrypoint=web"
+# - "--ping.entrypoint=web"`
volumes: []
# - name: public-cert
# mountPath: "/certs"
@@ -227,25 +232,22 @@ volumes: []
# mountPath: "/config"
# type: configMap
-# Additional volumeMounts to add to the Traefik container
+# -- Additional volumeMounts to add to the Traefik container
additionalVolumeMounts: []
- # For instance when using a logshipper for access logs
+ # -- For instance when using a logshipper for access logs
# - name: traefik-logs
# mountPath: /var/log/traefik
-## Logs
-## https://docs.traefik.io/observability/logs/
logs:
- ## Traefik logs concern everything that happens to Traefik itself (startup, configuration, events, shutdown, and so on).
general:
- # By default, the logs use a text format (common), but you can
+ # -- By default, the logs use a text format (common), but you can
# also ask for the json format in the format option
# format: json
# By default, the level is set to ERROR.
- # Alternative logging levels are DEBUG, PANIC, FATAL, ERROR, WARN, and INFO.
+ # -- Alternative logging levels are DEBUG, PANIC, FATAL, ERROR, WARN, and INFO.
level: ERROR
access:
- # To enable access logs
+ # -- To enable access logs
enabled: false
## By default, logs are written using the Common Log Format (CLF) on stdout.
## To write logs in JSON, use json in the format option.
@@ -256,21 +258,24 @@ logs:
## This option represents the number of log lines Traefik will keep in memory before writing
## them to the selected output. In some cases, this option can greatly help performances.
# bufferingSize: 100
- ## Filtering https://docs.traefik.io/observability/access-logs/#filtering
+ ## Filtering
+ # -- https://docs.traefik.io/observability/access-logs/#filtering
filters: {}
# statuscodes: "200,300-302"
# retryattempts: true
# minduration: 10ms
- ## Fields
- ## https://docs.traefik.io/observability/access-logs/#limiting-the-fieldsincluding-headers
fields:
general:
+ # -- Available modes: keep, drop, redact.
defaultmode: keep
+ # -- Names of the fields to limit.
names: {}
## Examples:
# ClientUsername: drop
headers:
+ # -- Available modes: keep, drop, redact.
defaultmode: drop
+ # -- Names of the headers to limit.
names: {}
## Examples:
# User-Agent: redact
@@ -278,10 +283,10 @@ logs:
# Content-Type: keep
metrics:
- ## Prometheus is enabled by default.
- ## It can be disabled by setting "prometheus: null"
+ ## -- Prometheus is enabled by default.
+ ## -- It can be disabled by setting "prometheus: null"
prometheus:
- ## Entry point used to expose metrics.
+ # -- Entry point used to expose metrics.
entryPoint: metrics
## Enable metrics on entry points. Default=true
# addEntryPointsLabels: false
@@ -404,11 +409,9 @@ metrics:
# ## This instructs the reporter to send metrics to the OpenTelemetry Collector using gRPC.
# grpc: true
-##
-## enable optional CRDs for Prometheus Operator
+## -- enable optional CRDs for Prometheus Operator
##
## Create a dedicated metrics service for use with ServiceMonitor
- ## When hub.enabled is set to true, it's not needed: it will use hub service.
# service:
# enabled: false
# labels: {}
@@ -455,6 +458,8 @@ metrics:
# summary: "Traefik Down"
# description: "{{ $labels.pod }} on {{ $labels.nodename }} is down"
+## Tracing
+# -- https://doc.traefik.io/traefik/observability/tracing/overview/
tracing: {}
# instana:
# localAgentHost: 127.0.0.1
@@ -497,20 +502,21 @@ tracing: {}
# secretToken: ""
# serviceEnvironment: ""
+# -- Global command arguments to be passed to all traefik's pods
globalArguments:
- "--global.checknewversion"
- "--global.sendanonymoususage"
#
# Configure Traefik static configuration
-# Additional arguments to be passed at Traefik's binary
+# -- Additional arguments to be passed at Traefik's binary
# All available options available on https://docs.traefik.io/reference/static-configuration/cli/
## Use curly braces to pass values: `helm install --set="additionalArguments={--providers.kubernetesingress.ingressclass=traefik-internal,--log.level=DEBUG}"`
additionalArguments: []
# - "--providers.kubernetesingress.ingressclass=traefik-internal"
# - "--log.level=DEBUG"
-# Environment variables to be passed to Traefik's binary
+# -- Environment variables to be passed to Traefik's binary
env: []
# - name: SOME_VAR
# value: some-var-value
@@ -525,22 +531,20 @@ env: []
# name: secret-name
# key: secret-key
+# -- Environment variables to be passed to Traefik's binary from configMaps or secrets
envFrom: []
# - configMapRef:
# name: config-map-name
# - secretRef:
# name: secret-name
-# Configure ports
ports:
- # The name of this one can't be changed as it is used for the readiness and
- # liveness probes, but you can adjust its config to your liking
traefik:
port: 9000
- # Use hostPort if set.
+ # -- Use hostPort if set.
# hostPort: 9000
#
- # Use hostIP if set. If not set, Kubernetes will default to 0.0.0.0, which
+ # -- Use hostIP if set. If not set, Kubernetes will default to 0.0.0.0, which
# means it's listening on all your interfaces and all your IPs. You may want
# to set this value if you need traefik to listen on specific interface
# only.
@@ -558,27 +562,27 @@ ports:
# Defines whether the port is exposed if service.type is LoadBalancer or
# NodePort.
#
- # You SHOULD NOT expose the traefik port on production deployments.
+ # -- You SHOULD NOT expose the traefik port on production deployments.
# If you want to access it from outside of your cluster,
# use `kubectl port-forward` or create a secure ingress
expose: false
- # The exposed port for this service
+ # -- The exposed port for this service
exposedPort: 9000
- # The port protocol (TCP/UDP)
+ # -- The port protocol (TCP/UDP)
protocol: TCP
web:
- ## Enable this entrypoint as a default entrypoint. When a service doesn't explicity set an entrypoint it will only use this entrypoint.
+ ## -- Enable this entrypoint as a default entrypoint. When a service doesn't explicity set an entrypoint it will only use this entrypoint.
# asDefault: true
port: 8000
# hostPort: 8000
# containerPort: 8000
expose: true
exposedPort: 80
- ## Different target traefik port on the cluster, useful for IP type LB
+ ## -- Different target traefik port on the cluster, useful for IP type LB
# targetPort: 80
# The port protocol (TCP/UDP)
protocol: TCP
- # Use nodeport if set. This is useful if you have configured Traefik in a
+ # -- Use nodeport if set. This is useful if you have configured Traefik in a
# LoadBalancer.
# nodePort: 32080
# Port Redirections
@@ -596,20 +600,22 @@ ports:
# trustedIPs: []
# insecure: false
websecure:
- ## Enable this entrypoint as a default entrypoint. When a service doesn't explicity set an entrypoint it will only use this entrypoint.
+ ## -- Enable this entrypoint as a default entrypoint. When a service doesn't explicity set an entrypoint it will only use this entrypoint.
# asDefault: true
port: 8443
# hostPort: 8443
# containerPort: 8443
expose: true
exposedPort: 443
- ## Different target traefik port on the cluster, useful for IP type LB
+ ## -- Different target traefik port on the cluster, useful for IP type LB
# targetPort: 80
- ## The port protocol (TCP/UDP)
+ ## -- The port protocol (TCP/UDP)
protocol: TCP
# nodePort: 32443
+ ## -- Specify an application protocol. This may be used as a hint for a Layer 7 load balancer.
+ # appProtocol: https
#
- ## Enable HTTP/3 on the entrypoint
+ ## -- Enable HTTP/3 on the entrypoint
## Enabling it will also enable http3 experimental feature
## https://doc.traefik.io/traefik/routing/entrypoints/#http3
## There are known limitations when trying to listen on same ports for
@@ -619,12 +625,12 @@ ports:
enabled: false
# advertisedPort: 4443
#
- ## Trust forwarded headers information (X-Forwarded-*).
+ ## -- Trust forwarded headers information (X-Forwarded-*).
#forwardedHeaders:
# trustedIPs: []
# insecure: false
#
- ## Enable the Proxy Protocol header parsing for the entry point
+ ## -- Enable the Proxy Protocol header parsing for the entry point
#proxyProtocol:
# trustedIPs: []
# insecure: false
@@ -642,33 +648,33 @@ ports:
# - foo.example.com
# - bar.example.com
#
- # One can apply Middlewares on an entrypoint
+ # -- One can apply Middlewares on an entrypoint
# https://doc.traefik.io/traefik/middlewares/overview/
# https://doc.traefik.io/traefik/routing/entrypoints/#middlewares
- # /!\ It introduces here a link between your static configuration and your dynamic configuration /!\
+ # -- /!\ It introduces here a link between your static configuration and your dynamic configuration /!\
# It follows the provider naming convention: https://doc.traefik.io/traefik/providers/overview/#provider-namespace
# middlewares:
# - namespace-name1@kubernetescrd
# - namespace-name2@kubernetescrd
middlewares: []
metrics:
- # When using hostNetwork, use another port to avoid conflict with node exporter:
+ # -- When using hostNetwork, use another port to avoid conflict with node exporter:
# https://github.com/prometheus/prometheus/wiki/Default-port-allocations
port: 9100
# hostPort: 9100
# Defines whether the port is exposed if service.type is LoadBalancer or
# NodePort.
#
- # You may not want to expose the metrics port on production deployments.
+ # -- You may not want to expose the metrics port on production deployments.
# If you want to access it from outside of your cluster,
# use `kubectl port-forward` or create a secure ingress
expose: false
- # The exposed port for this service
+ # -- The exposed port for this service
exposedPort: 9100
- # The port protocol (TCP/UDP)
+ # -- The port protocol (TCP/UDP)
protocol: TCP
-# TLS Options are created as TLSOption CRDs
+# -- TLS Options are created as TLSOption CRDs
# https://doc.traefik.io/traefik/https/tls/#tls-options
# When using `labelSelector`, you'll need to set labels on tlsOption accordingly.
# Example:
@@ -684,7 +690,7 @@ ports:
# - CurveP384
tlsOptions: {}
-# TLS Store are created as TLSStore CRDs. This is useful if you want to set a default certificate
+# -- TLS Store are created as TLSStore CRDs. This is useful if you want to set a default certificate
# https://doc.traefik.io/traefik/https/tls/#default-certificate
# Example:
# tlsStore:
@@ -693,24 +699,22 @@ tlsOptions: {}
# secretName: tls-cert
tlsStore: {}
-# Options for the main traefik service, where the entrypoints traffic comes
-# from.
service:
enabled: true
- ## Single service is using `MixedProtocolLBService` feature gate.
- ## When set to false, it will create two Service, one for TCP and one for UDP.
+ ## -- Single service is using `MixedProtocolLBService` feature gate.
+ ## -- When set to false, it will create two Service, one for TCP and one for UDP.
single: true
type: LoadBalancer
- # Additional annotations applied to both TCP and UDP services (e.g. for cloud provider specific config)
+ # -- Additional annotations applied to both TCP and UDP services (e.g. for cloud provider specific config)
annotations: {}
- # Additional annotations for TCP service only
+ # -- Additional annotations for TCP service only
annotationsTCP: {}
- # Additional annotations for UDP service only
+ # -- Additional annotations for UDP service only
annotationsUDP: {}
- # Additional service labels (e.g. for filtering Service by custom labels)
+ # -- Additional service labels (e.g. for filtering Service by custom labels)
labels: {}
- # Additional entries here will be added to the service spec.
- # Cannot contain type, selector or ports entries.
+ # -- Additional entries here will be added to the service spec.
+ # -- Cannot contain type, selector or ports entries.
spec: {}
# externalTrafficPolicy: Cluster
# loadBalancerIP: "1.2.3.4"
@@ -718,6 +722,8 @@ service:
loadBalancerSourceRanges: []
# - 192.168.0.1/32
# - 172.16.0.0/16
+ ## -- Class of the load balancer implementation
+ # loadBalancerClass: service.k8s.aws/nlb
externalIPs: []
# - 1.2.3.4
## One of SingleStack, PreferDualStack, or RequireDualStack.
@@ -728,7 +734,7 @@ service:
# - IPv4
# - IPv6
##
- ## An additionnal and optional internal Service.
+ ## -- An additionnal and optional internal Service.
## Same parameters as external Service
# internal:
# type: ClusterIP
@@ -739,9 +745,8 @@ service:
# # externalIPs: []
# # ipFamilies: [ "IPv4","IPv6" ]
-## Create HorizontalPodAutoscaler object.
-##
autoscaling:
+ # -- Create HorizontalPodAutoscaler object.
enabled: false
# minReplicas: 1
# maxReplicas: 10
@@ -766,10 +771,10 @@ autoscaling:
# value: 1
# periodSeconds: 60
-# Enable persistence using Persistent Volume Claims
-# ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
-# It can be used to store TLS certificates, see `storage` in certResolvers
persistence:
+ # -- Enable persistence using Persistent Volume Claims
+ # ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
+ # It can be used to store TLS certificates, see `storage` in certResolvers
enabled: false
name: data
# existingClaim: ""
@@ -779,8 +784,10 @@ persistence:
# volumeName: ""
path: /data
annotations: {}
- # subPath: "" # only mount a subpath of the Volume into the pod
+ # -- Only mount a subpath of the Volume into the pod
+ # subPath: ""
+# -- Certificates resolvers configuration
certResolvers: {}
# letsencrypt:
# # for challenge options cf. https://doc.traefik.io/traefik/https/acme/
@@ -802,13 +809,13 @@ certResolvers: {}
# # It has to match the path with a persistent volume
# storage: /data/acme.json
-# If hostNetwork is true, runs traefik in the host network namespace
+# -- If hostNetwork is true, runs traefik in the host network namespace
# To prevent unschedulabel pods due to port collisions, if hostNetwork=true
# and replicas>1, a pod anti-affinity is recommended and will be set if the
# affinity is left as default.
hostNetwork: false
-# Whether Role Based Access Control objects like roles and rolebindings should be created
+# -- Whether Role Based Access Control objects like roles and rolebindings should be created
rbac:
enabled: true
# If set to false, installs ClusterRole and ClusterRoleBinding so Traefik can be used across namespaces.
@@ -818,19 +825,20 @@ rbac:
# https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles
# aggregateTo: [ "admin" ]
-# Enable to create a PodSecurityPolicy and assign it to the Service Account via RoleBinding or ClusterRoleBinding
+# -- Enable to create a PodSecurityPolicy and assign it to the Service Account via RoleBinding or ClusterRoleBinding
podSecurityPolicy:
enabled: false
-# The service account the pods will use to interact with the Kubernetes API
+# -- The service account the pods will use to interact with the Kubernetes API
serviceAccount:
# If set, an existing service account is used
# If not set, a service account is created automatically using the fullname template
name: ""
-# Additional serviceAccount annotations (e.g. for oidc authentication)
+# -- Additional serviceAccount annotations (e.g. for oidc authentication)
serviceAccountAnnotations: {}
+# -- The resources parameter defines CPU and memory requirements and limits for Traefik's containers.
resources: {}
# requests:
# cpu: "100m"
@@ -839,8 +847,8 @@ resources: {}
# cpu: "300m"
# memory: "150Mi"
-# This example pod anti-affinity forces the scheduler to put traefik pods
-# on nodes where no other traefik pods are scheduled.
+# -- This example pod anti-affinity forces the scheduler to put traefik pods
+# -- on nodes where no other traefik pods are scheduled.
# It should be used when hostNetwork: true to prevent port conflicts
affinity: {}
# podAntiAffinity:
@@ -851,11 +859,15 @@ affinity: {}
# app.kubernetes.io/instance: '{{ .Release.Name }}-{{ .Release.Namespace }}'
# topologyKey: kubernetes.io/hostname
+# -- nodeSelector is the simplest recommended form of node selection constraint.
nodeSelector: {}
+# -- Tolerations allow the scheduler to schedule pods with matching taints.
tolerations: []
+# -- You can use topology spread constraints to control
+# how Pods are spread across your cluster among failure-domains.
topologySpreadConstraints: []
-# # This example topologySpreadConstraints forces the scheduler to put traefik pods
-# # on nodes where no other traefik pods are scheduled.
+# This example topologySpreadConstraints forces the scheduler to put traefik pods
+# on nodes where no other traefik pods are scheduled.
# - labelSelector:
# matchLabels:
# app: '{{ template "traefik.name" . }}'
@@ -863,29 +875,33 @@ topologySpreadConstraints: []
# topologyKey: kubernetes.io/hostname
# whenUnsatisfiable: DoNotSchedule
-# Pods can have priority.
-# Priority indicates the importance of a Pod relative to other Pods.
+# -- Pods can have priority.
+# -- Priority indicates the importance of a Pod relative to other Pods.
priorityClassName: ""
-# Set the container security context
-# To run the container with ports below 1024 this will need to be adjust to run as root
+# -- Set the container security context
+# -- To run the container with ports below 1024 this will need to be adjust to run as root
securityContext:
capabilities:
drop: [ALL]
readOnlyRootFilesystem: true
podSecurityContext:
-# # /!\ When setting fsGroup, Kubernetes will recursively changes ownership and
-# # permissions for the contents of each volume to match the fsGroup. This can
-# # be an issue when storing sensitive content like TLS Certificates /!\
-# fsGroup: 65532
+ # /!\ When setting fsGroup, Kubernetes will recursively changes ownership and
+ # permissions for the contents of each volume to match the fsGroup. This can
+ # be an issue when storing sensitive content like TLS Certificates /!\
+ # fsGroup: 65532
+ # -- Specifies the policy for changing ownership and permissions of volume contents to match the fsGroup.
fsGroupChangePolicy: "OnRootMismatch"
+ # -- The ID of the group for all containers in the pod to run as.
runAsGroup: 65532
+ # -- Specifies whether the containers should run as a non-root user.
runAsNonRoot: true
+ # -- The ID of the user for all containers in the pod to run as.
runAsUser: 65532
#
-# Extra objects to deploy (value evaluated as a template)
+# -- Extra objects to deploy (value evaluated as a template)
#
# In some cases, it can avoid the need for additional, extended or adhoc deployments.
# See #595 for more details and traefik/tests/values/extra.yaml for example.
@@ -895,5 +911,5 @@ extraObjects: []
# It will not affect optional CRDs such as `ServiceMonitor` and `PrometheusRules`
# namespaceOverride: traefik
#
-## This will override the default app.kubernetes.io/instance label for all Objects.
+## -- This will override the default app.kubernetes.io/instance label for all Objects.
# instanceLabelOverride: traefik
```
## 23.0.1 ![AppVersion: v2.10.1](https://img.shields.io/static/v1?label=AppVersion&message=v2.10.1&color=success&logo=) ![Kubernetes: >=1.16.0-0](https://img.shields.io/static/v1?label=Kubernetes&message=%3E%3D1.16.0-0&color=informational&logo=kubernetes) ![Helm: v3](https://img.shields.io/static/v1?label=Helm&message=v3&color=informational&logo=helm) ## 23.0.1 ![AppVersion: v2.10.1](https://img.shields.io/static/v1?label=AppVersion&message=v2.10.1&color=success&logo=) ![Kubernetes: >=1.16.0-0](https://img.shields.io/static/v1?label=Kubernetes&message=%3E%3D1.16.0-0&color=informational&logo=kubernetes) ![Helm: v3](https://img.shields.io/static/v1?label=Helm&message=v3&color=informational&logo=helm)
**Release date:** 2023-04-27 **Release date:** 2023-04-28
* ⬆️ Upgrade traefik Docker tag to v2.10.1 * fix: ⬆️ Upgrade traefik Docker tag to v2.10.1
## 23.0.0 ![AppVersion: v2.10.0](https://img.shields.io/static/v1?label=AppVersion&message=v2.10.0&color=success&logo=) ![Kubernetes: >=1.16.0-0](https://img.shields.io/static/v1?label=Kubernetes&message=%3E%3D1.16.0-0&color=informational&logo=kubernetes) ![Helm: v3](https://img.shields.io/static/v1?label=Helm&message=v3&color=informational&logo=helm) ## 23.0.0 ![AppVersion: v2.10.0](https://img.shields.io/static/v1?label=AppVersion&message=v2.10.0&color=success&logo=) ![Kubernetes: >=1.16.0-0](https://img.shields.io/static/v1?label=Kubernetes&message=%3E%3D1.16.0-0&color=informational&logo=kubernetes) ![Helm: v3](https://img.shields.io/static/v1?label=Helm&message=v3&color=informational&logo=helm)

View File

@ -1,6 +1,11 @@
annotations: annotations:
artifacthub.io/changes: | artifacthub.io/changes: "- \"release: \U0001F680 publish v23.1.0\"\n- \"feat:
- "⬆️ Upgrade traefik Docker tag to v2.10.1" add a warning when labelSelector don't match\"\n- \"feat: add optional `appProtocol`
field on Service ports\"\n- \"feat: remove Traefik Hub v1 integration\"\n- \"feat:
allow specifying service loadBalancerClass\"\n- \"feat: common labels for all
resources\"\n- \"fix: \U0001F41B use k8s version for hpa api version\"\n- \"fix:
\U0001F41B http3 support on traefik v3\"\n- \"fix: use `targetPort` instead of
`port` on ServiceMonitor\"\n- \"doc: added values README via helm-docs cli\"\n"
catalog.cattle.io/certified: partner catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: Traefik Proxy catalog.cattle.io/display-name: Traefik Proxy
catalog.cattle.io/kube-version: '>=1.16.0-0' catalog.cattle.io/kube-version: '>=1.16.0-0'
@ -31,4 +36,4 @@ sources:
- https://github.com/traefik/traefik - https://github.com/traefik/traefik
- https://github.com/traefik/traefik-helm-chart - https://github.com/traefik/traefik-helm-chart
type: application type: application
version: 23.0.1 version: 23.1.0

View File

@ -2,19 +2,22 @@
This document outlines the guidelines for developing, managing and extending the Traefik helm chart. This document outlines the guidelines for developing, managing and extending the Traefik helm chart.
This Helm Chart is documented using field description from comments with [helm-docs](https://github.com/norwoodj/helm-docs).
Optionallity Optionallity
All non-critical features (Features not mandatory to starting Traefik) in the helm chart must be optional. All non-critical features should be disabled (commented out) in the values.yaml file. All optional non-critical features should be disabled (commented out) in the values.yaml file, and have a comment # (Optional) in the line above. This allows minimal configuration, and ease of extension. All non-critical features (Features not mandatory to starting Traefik) in the helm chart must be optional. All non-critical features should be disabled (commented out) in the values.yaml file. All optional non-critical features should be disabled (commented out) in the values.yaml file, and have a comment # (Optional) in the line above. This allows minimal configuration, and ease of extension.
## Critical Feature Example ## Feature Example
```yaml ```yaml
image: image:
name: traefik # -- Traefik image host registry
registry: docker.io
``` ```
This feature is critical, and therefore is defined clearly in the values.yaml file. This feature is expected and therefore is defined clearly in the values.yaml file.
## Non-Critical Feature Example ## Optional Feature Example
```yaml ```yaml
# storage: # storage:
@ -22,7 +25,7 @@ This feature is critical, and therefore is defined clearly in the values.yaml fi
# type: emptyDir # type: emptyDir
``` ```
This feature is non-critical, and therefore is commented out by default in the values.yaml file. This feature is optional, non-critical, and therefore is commented out by default in the values.yaml file.
To allow this, template blocks that use this need to recursively test for existence of values before using them: To allow this, template blocks that use this need to recursively test for existence of values before using them:
@ -87,7 +90,3 @@ There should be an empty commented line between each primary key in the values.y
## Values YAML Design ## Values YAML Design
The values.yaml file is designed to be user-friendly. It does not have to resemble the templated configuration if it is not conducive. Similarly, value names to not have to correspond to fields in the tempate if it is not condusive. The values.yaml file is designed to be user-friendly. It does not have to resemble the templated configuration if it is not conducive. Similarly, value names to not have to correspond to fields in the tempate if it is not condusive.
## Comments
The values.yaml file should not contain comments or explainations of what options are, or what values are available. The values table in the README file is for this purpose.

View File

@ -0,0 +1,165 @@
# traefik
![Version: 23.1.0](https://img.shields.io/badge/Version-23.1.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v2.10.1](https://img.shields.io/badge/AppVersion-v2.10.1-informational?style=flat-square)
A Traefik based Kubernetes ingress controller
**Homepage:** <https://traefik.io/>
## Maintainers
| Name | Email | Url |
| ---- | ------ | --- |
| emilevauge | <emile@vauge.com> | |
| dtomcej | <daniel.tomcej@gmail.com> | |
| ldez | <ldez@traefik.io> | |
| mloiseleur | <michel.loiseleur@traefik.io> | |
| charlie-haley | <charlie.haley@traefik.io> | |
## Source Code
* <https://github.com/traefik/traefik>
* <https://github.com/traefik/traefik-helm-chart>
## Requirements
Kubernetes: `>=1.16.0-0`
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| additionalArguments | list | `[]` | Additional arguments to be passed at Traefik's binary All available options available on https://docs.traefik.io/reference/static-configuration/cli/ # Use curly braces to pass values: `helm install --set="additionalArguments={--providers.kubernetesingress.ingressclass=traefik-internal,--log.level=DEBUG}"` |
| additionalVolumeMounts | list | `[]` | Additional volumeMounts to add to the Traefik container |
| affinity | object | `{}` | on nodes where no other traefik pods are scheduled. It should be used when hostNetwork: true to prevent port conflicts |
| autoscaling.enabled | bool | `false` | Create HorizontalPodAutoscaler object. |
| certResolvers | object | `{}` | Certificates resolvers configuration |
| commonLabels | object | `{}` | Add additional label to all resources |
| deployment.additionalContainers | list | `[]` | Additional containers (e.g. for metric offloading sidecars) |
| deployment.additionalVolumes | list | `[]` | Additional volumes available for use with initContainers and additionalContainers |
| deployment.annotations | object | `{}` | Additional deployment annotations (e.g. for jaeger-operator sidecar injection) |
| deployment.dnsConfig | object | `{}` | Custom pod DNS policy. Apply if `hostNetwork: true` dnsPolicy: ClusterFirstWithHostNet |
| deployment.enabled | bool | `true` | Enable deployment |
| deployment.imagePullSecrets | list | `[]` | Additional imagePullSecrets |
| deployment.initContainers | list | `[]` | Additional initContainers (e.g. for setting file permission as shown below) |
| deployment.kind | string | `"Deployment"` | Deployment or DaemonSet |
| deployment.labels | object | `{}` | Additional deployment labels (e.g. for filtering deployment by custom labels) |
| deployment.lifecycle | object | `{}` | Pod lifecycle actions |
| deployment.minReadySeconds | int | `0` | The minimum number of seconds Traefik needs to be up and running before the DaemonSet/Deployment controller considers it available |
| deployment.podAnnotations | object | `{}` | Additional pod annotations (e.g. for mesh injection or prometheus scraping) |
| deployment.podLabels | object | `{}` | Additional Pod labels (e.g. for filtering Pod by custom labels) |
| deployment.replicas | int | `1` | Number of pods of the deployment (only applies when kind == Deployment) |
| deployment.shareProcessNamespace | bool | `false` | Use process namespace sharing |
| deployment.terminationGracePeriodSeconds | int | `60` | Amount of time (in seconds) before Kubernetes will send the SIGKILL signal if Traefik does not shut down |
| env | list | `[]` | Environment variables to be passed to Traefik's binary |
| envFrom | list | `[]` | Environment variables to be passed to Traefik's binary from configMaps or secrets |
| experimental.kubernetesGateway.enabled | bool | `false` | Enable traefik experimental GatewayClass CRD |
| experimental.kubernetesGateway.gateway.enabled | bool | `true` | Enable traefik regular kubernetes gateway |
| experimental.plugins.enabled | bool | `false` | Enable traefik experimental plugins |
| experimental.v3.enabled | bool | `false` | Enable traefik version 3 |
| extraObjects | list | `[]` | Extra objects to deploy (value evaluated as a template) In some cases, it can avoid the need for additional, extended or adhoc deployments. See #595 for more details and traefik/tests/values/extra.yaml for example. |
| globalArguments | list | `["--global.checknewversion","--global.sendanonymoususage"]` | Global command arguments to be passed to all traefik's pods |
| hostNetwork | bool | `false` | If hostNetwork is true, runs traefik in the host network namespace To prevent unschedulabel pods due to port collisions, if hostNetwork=true and replicas>1, a pod anti-affinity is recommended and will be set if the affinity is left as default. |
| image.pullPolicy | string | `"IfNotPresent"` | Traefik image pull policy |
| image.registry | string | `"docker.io"` | Traefik image host registry |
| image.repository | string | `"traefik"` | Traefik image repository |
| image.tag | string | `""` | defaults to appVersion |
| ingressClass | object | `{"enabled":true,"isDefaultClass":true}` | Create a default IngressClass for Traefik |
| ingressRoute.dashboard.annotations | object | `{}` | Additional ingressRoute annotations (e.g. for kubernetes.io/ingress.class) |
| ingressRoute.dashboard.enabled | bool | `true` | Create an IngressRoute for the dashboard |
| ingressRoute.dashboard.entryPoints | list | `["traefik"]` | Specify the allowed entrypoints to use for the dashboard ingress route, (e.g. traefik, web, websecure). By default, it's using traefik entrypoint, which is not exposed. /!\ Do not expose your dashboard without any protection over the internet /!\ |
| ingressRoute.dashboard.labels | object | `{}` | Additional ingressRoute labels (e.g. for filtering IngressRoute by custom labels) |
| ingressRoute.dashboard.matchRule | string | `"PathPrefix(`/dashboard`) || PathPrefix(`/api`)"` | The router match rule used for the dashboard ingressRoute |
| ingressRoute.dashboard.middlewares | list | `[]` | Additional ingressRoute middlewares (e.g. for authentication) |
| ingressRoute.dashboard.tls | object | `{}` | TLS options (e.g. secret containing certificate) |
| livenessProbe.failureThreshold | int | `3` | The number of consecutive failures allowed before considering the probe as failed. |
| livenessProbe.initialDelaySeconds | int | `2` | The number of seconds to wait before starting the first probe. |
| livenessProbe.periodSeconds | int | `10` | The number of seconds to wait between consecutive probes. |
| livenessProbe.successThreshold | int | `1` | The minimum consecutive successes required to consider the probe successful. |
| livenessProbe.timeoutSeconds | int | `2` | The number of seconds to wait for a probe response before considering it as failed. |
| logs.access.enabled | bool | `false` | To enable access logs |
| logs.access.fields.general.defaultmode | string | `"keep"` | Available modes: keep, drop, redact. |
| logs.access.fields.general.names | object | `{}` | Names of the fields to limit. |
| logs.access.fields.headers.defaultmode | string | `"drop"` | Available modes: keep, drop, redact. |
| logs.access.fields.headers.names | object | `{}` | Names of the headers to limit. |
| logs.access.filters | object | `{}` | https://docs.traefik.io/observability/access-logs/#filtering |
| logs.general.level | string | `"ERROR"` | Alternative logging levels are DEBUG, PANIC, FATAL, ERROR, WARN, and INFO. |
| metrics.prometheus.entryPoint | string | `"metrics"` | Entry point used to expose metrics. |
| nodeSelector | object | `{}` | nodeSelector is the simplest recommended form of node selection constraint. |
| persistence.accessMode | string | `"ReadWriteOnce"` | |
| persistence.annotations | object | `{}` | |
| persistence.enabled | bool | `false` | Enable persistence using Persistent Volume Claims ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ It can be used to store TLS certificates, see `storage` in certResolvers |
| persistence.name | string | `"data"` | |
| persistence.path | string | `"/data"` | |
| persistence.size | string | `"128Mi"` | |
| podDisruptionBudget | object | `{"enabled":false}` | Pod disruption budget |
| podSecurityContext.fsGroupChangePolicy | string | `"OnRootMismatch"` | Specifies the policy for changing ownership and permissions of volume contents to match the fsGroup. |
| podSecurityContext.runAsGroup | int | `65532` | The ID of the group for all containers in the pod to run as. |
| podSecurityContext.runAsNonRoot | bool | `true` | Specifies whether the containers should run as a non-root user. |
| podSecurityContext.runAsUser | int | `65532` | The ID of the user for all containers in the pod to run as. |
| podSecurityPolicy | object | `{"enabled":false}` | Enable to create a PodSecurityPolicy and assign it to the Service Account via RoleBinding or ClusterRoleBinding |
| ports.metrics.expose | bool | `false` | You may not want to expose the metrics port on production deployments. If you want to access it from outside of your cluster, use `kubectl port-forward` or create a secure ingress |
| ports.metrics.exposedPort | int | `9100` | The exposed port for this service |
| ports.metrics.port | int | `9100` | When using hostNetwork, use another port to avoid conflict with node exporter: https://github.com/prometheus/prometheus/wiki/Default-port-allocations |
| ports.metrics.protocol | string | `"TCP"` | The port protocol (TCP/UDP) |
| ports.traefik.expose | bool | `false` | You SHOULD NOT expose the traefik port on production deployments. If you want to access it from outside of your cluster, use `kubectl port-forward` or create a secure ingress |
| ports.traefik.exposedPort | int | `9000` | The exposed port for this service |
| ports.traefik.port | int | `9000` | |
| ports.traefik.protocol | string | `"TCP"` | The port protocol (TCP/UDP) |
| ports.web.expose | bool | `true` | |
| ports.web.exposedPort | int | `80` | |
| ports.web.port | int | `8000` | |
| ports.web.protocol | string | `"TCP"` | |
| ports.websecure.expose | bool | `true` | |
| ports.websecure.exposedPort | int | `443` | |
| ports.websecure.http3.enabled | bool | `false` | |
| ports.websecure.middlewares | list | `[]` | /!\ It introduces here a link between your static configuration and your dynamic configuration /!\ It follows the provider naming convention: https://doc.traefik.io/traefik/providers/overview/#provider-namespace middlewares: - namespace-name1@kubernetescrd - namespace-name2@kubernetescrd |
| ports.websecure.port | int | `8443` | |
| ports.websecure.protocol | string | `"TCP"` | |
| ports.websecure.tls.certResolver | string | `""` | |
| ports.websecure.tls.domains | list | `[]` | |
| ports.websecure.tls.enabled | bool | `true` | |
| ports.websecure.tls.options | string | `""` | |
| priorityClassName | string | `""` | Priority indicates the importance of a Pod relative to other Pods. |
| providers.kubernetesCRD.allowCrossNamespace | bool | `false` | Allows IngressRoute to reference resources in namespace other than theirs |
| providers.kubernetesCRD.allowEmptyServices | bool | `false` | Allows to return 503 when there is no endpoints available |
| providers.kubernetesCRD.allowExternalNameServices | bool | `false` | Allows to reference ExternalName services in IngressRoute |
| providers.kubernetesCRD.enabled | bool | `true` | Load Kubernetes IngressRoute provider |
| providers.kubernetesCRD.namespaces | list | `[]` | Array of namespaces to watch. If left empty, Traefik watches all namespaces. |
| providers.kubernetesIngress.allowEmptyServices | bool | `false` | Allows to return 503 when there is no endpoints available |
| providers.kubernetesIngress.allowExternalNameServices | bool | `false` | Allows to reference ExternalName services in Ingress |
| providers.kubernetesIngress.enabled | bool | `true` | Load Kubernetes IngressRoute provider |
| providers.kubernetesIngress.namespaces | list | `[]` | Array of namespaces to watch. If left empty, Traefik watches all namespaces. |
| providers.kubernetesIngress.publishedService.enabled | bool | `false` | |
| rbac | object | `{"enabled":true,"namespaced":false}` | Whether Role Based Access Control objects like roles and rolebindings should be created |
| readinessProbe.failureThreshold | int | `1` | The number of consecutive failures allowed before considering the probe as failed. |
| readinessProbe.initialDelaySeconds | int | `2` | The number of seconds to wait before starting the first probe. |
| readinessProbe.periodSeconds | int | `10` | The number of seconds to wait between consecutive probes. |
| readinessProbe.successThreshold | int | `1` | The minimum consecutive successes required to consider the probe successful. |
| readinessProbe.timeoutSeconds | int | `2` | The number of seconds to wait for a probe response before considering it as failed. |
| resources | object | `{}` | The resources parameter defines CPU and memory requirements and limits for Traefik's containers. |
| securityContext | object | `{"capabilities":{"drop":["ALL"]},"readOnlyRootFilesystem":true}` | To run the container with ports below 1024 this will need to be adjust to run as root |
| service.annotations | object | `{}` | Additional annotations applied to both TCP and UDP services (e.g. for cloud provider specific config) |
| service.annotationsTCP | object | `{}` | Additional annotations for TCP service only |
| service.annotationsUDP | object | `{}` | Additional annotations for UDP service only |
| service.enabled | bool | `true` | |
| service.externalIPs | list | `[]` | |
| service.labels | object | `{}` | Additional service labels (e.g. for filtering Service by custom labels) |
| service.loadBalancerSourceRanges | list | `[]` | |
| service.single | bool | `true` | |
| service.spec | object | `{}` | Cannot contain type, selector or ports entries. |
| service.type | string | `"LoadBalancer"` | |
| serviceAccount | object | `{"name":""}` | The service account the pods will use to interact with the Kubernetes API |
| serviceAccountAnnotations | object | `{}` | Additional serviceAccount annotations (e.g. for oidc authentication) |
| tlsOptions | object | `{}` | TLS Options are created as TLSOption CRDs https://doc.traefik.io/traefik/https/tls/#tls-options When using `labelSelector`, you'll need to set labels on tlsOption accordingly. Example: tlsOptions: default: labels: {} sniStrict: true preferServerCipherSuites: true customOptions: labels: {} curvePreferences: - CurveP521 - CurveP384 |
| tlsStore | object | `{}` | TLS Store are created as TLSStore CRDs. This is useful if you want to set a default certificate https://doc.traefik.io/traefik/https/tls/#default-certificate Example: tlsStore: default: defaultCertificate: secretName: tls-cert |
| tolerations | list | `[]` | Tolerations allow the scheduler to schedule pods with matching taints. |
| topologySpreadConstraints | list | `[]` | You can use topology spread constraints to control how Pods are spread across your cluster among failure-domains. |
| tracing | object | `{}` | https://doc.traefik.io/traefik/observability/tracing/overview/ |
| updateStrategy.rollingUpdate.maxSurge | int | `1` | |
| updateStrategy.rollingUpdate.maxUnavailable | int | `0` | |
| updateStrategy.type | string | `"RollingUpdate"` | Customize updateStrategy: RollingUpdate or OnDelete |
| volumes | list | `[]` | Add volumes to the traefik pod. The volume name will be passed to tpl. This can be used to mount a cert pair or a configmap that holds a config.toml file. After the volume has been mounted, add the configs into traefik by using the `additionalArguments` list below, eg: `additionalArguments: - "--providers.file.filename=/config/dynamic.toml" - "--ping" - "--ping.entrypoint=web"` |
----------------------------------------------
Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0)

View File

@ -1,23 +1,7 @@
Traefik Proxy {{ .Values.image.tag | default .Chart.AppVersion }} has been deployed successfully Traefik Proxy {{ .Values.image.tag | default .Chart.AppVersion }} has been deployed successfully on {{ template "traefik.namespace" . }} namespace !
on {{ template "traefik.namespace" . }} namespace !
{{- if .Values.hub.enabled }}
{{- if coalesce (ne (include "traefik.namespace" .) "hub-agent") .Values.hub.tunnelPort (ne (.Values.ports.metrics.port | int) 9100) }}
Traefik Hub integration is enabled ! With your specific parameters,
`metricsURL`, `tunnelHost` and `tunnelPort` needs to be set accordingly
on hub-agent Helm Chart. Based on this Chart, it should be:
--set controllerDeployment.traefik.metricsURL=http://traefik-hub.{{ template "traefik.namespace" . }}.svc.cluster.local:{{ .Values.ports.metrics.port }}/metrics
--set tunnelDeployment.traefik.tunnelHost=traefik-hub.{{ template "traefik.namespace" . }}.svc.cluster.local
--set tunnelDeployment.traefik.tunnelPort={{ default 9901 .Values.hub.tunnelPort }}
See https://doc.traefik.io/traefik-hub/install/#traefik-hub-agent-install-with-helmchart
{{- end }}
{{- end }}
{{- if .Values.persistence }} {{- if .Values.persistence }}
{{- if and .Values.persistence.enabled (empty .Values.deployment.initContainer)}} {{- if and .Values.persistence.enabled (empty .Values.deployment.initContainer)}}
@ -28,3 +12,25 @@ more info. 🚨
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- with .Values.providers.kubernetesCRD.labelSelector }}
{{- $labelsApplied := include "traefik.labels" $ }}
{{- $labelSelectors := regexSplit "," . -1 }}
{{- range $labelSelectors }}
{{- $labelSelectorRaw := regexSplit "=" . -1 }}
{{- $labelSelector := printf "%s: %s" (first $labelSelectorRaw) (last $labelSelectorRaw) }}
{{- if not (contains $labelSelector $labelsApplied) }}
🚨 Resources populated with this chart don't match with labelSelector `{{.}}` applied on kubernetesCRD provider 🚨
{{- end }}
{{- end }}
{{- end }}
{{- with .Values.providers.kubernetesIngress.labelSelector }}
{{- $labelsApplied := include "traefik.labels" $ }}
{{- $labelSelectors := regexSplit "," . -1 }}
{{- range $labelSelectors }}
{{- $labelSelectorRaw := regexSplit "=" . -1 }}
{{- $labelSelector := printf "%s: %s" (first $labelSelectorRaw) (last $labelSelectorRaw) }}
{{- if not (contains $labelSelector $labelsApplied) }}
🚨 Resources populated with this chart don't match with labelSelector `{{.}}` applied on kubernetesIngress provider 🚨
{{- end }}
{{- end }}
{{- end }}

View File

@ -58,6 +58,9 @@ app.kubernetes.io/instance: {{ template "traefik.instance-name" . }}
{{ include "traefik.labelselector" . }} {{ include "traefik.labelselector" . }}
helm.sh/chart: {{ template "traefik.chart" . }} helm.sh/chart: {{ template "traefik.chart" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- with .Values.commonLabels }}
{{ toYaml . }}
{{- end }}
{{- end }} {{- end }}
{{/* {{/*

View File

@ -102,11 +102,6 @@
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- if .Values.hub.enabled }}
- name: "traefikhub-tunl"
containerPort: {{ default 9901 .Values.hub.tunnelPort }}
protocol: "TCP"
{{- end }}
{{- with .Values.securityContext }} {{- with .Values.securityContext }}
securityContext: securityContext:
{{- toYaml . | nindent 10 }} {{- toYaml . | nindent 10 }}
@ -248,10 +243,10 @@
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- if (or .Values.metrics.prometheus .Values.hub.enabled) }} {{- if (.Values.metrics.prometheus) }}
- "--metrics.prometheus=true" - "--metrics.prometheus=true"
- "--metrics.prometheus.entrypoint={{ .Values.metrics.prometheus.entryPoint }}" - "--metrics.prometheus.entrypoint={{ .Values.metrics.prometheus.entryPoint }}"
{{- if (or (eq (.Values.metrics.prometheus.addRoutersLabels | toString) "true") .Values.hub.enabled) }} {{- if (eq (.Values.metrics.prometheus.addRoutersLabels | toString) "true") }}
- "--metrics.prometheus.addRoutersLabels=true" - "--metrics.prometheus.addRoutersLabels=true"
{{- end }} {{- end }}
{{- if ne .Values.metrics.prometheus.addEntryPointsLabels nil }} {{- if ne .Values.metrics.prometheus.addEntryPointsLabels nil }}
@ -483,10 +478,10 @@
{{- if .Values.providers.kubernetesCRD.ingressClass }} {{- if .Values.providers.kubernetesCRD.ingressClass }}
- "--providers.kubernetescrd.ingressClass={{ .Values.providers.kubernetesCRD.ingressClass }}" - "--providers.kubernetescrd.ingressClass={{ .Values.providers.kubernetesCRD.ingressClass }}"
{{- end }} {{- end }}
{{- if (or .Values.providers.kubernetesCRD.allowCrossNamespace .Values.hub.enabled) }} {{- if .Values.providers.kubernetesCRD.allowCrossNamespace }}
- "--providers.kubernetescrd.allowCrossNamespace=true" - "--providers.kubernetescrd.allowCrossNamespace=true"
{{- end }} {{- end }}
{{- if (or .Values.providers.kubernetesCRD.allowExternalNameServices .Values.hub.enabled) }} {{- if .Values.providers.kubernetesCRD.allowExternalNameServices }}
- "--providers.kubernetescrd.allowExternalNameServices=true" - "--providers.kubernetescrd.allowExternalNameServices=true"
{{- end }} {{- end }}
{{- if .Values.providers.kubernetesCRD.allowEmptyServices }} {{- if .Values.providers.kubernetesCRD.allowEmptyServices }}
@ -495,7 +490,7 @@
{{- end }} {{- end }}
{{- if .Values.providers.kubernetesIngress.enabled }} {{- if .Values.providers.kubernetesIngress.enabled }}
- "--providers.kubernetesingress" - "--providers.kubernetesingress"
{{- if (or .Values.providers.kubernetesIngress.allowExternalNameServices .Values.hub.enabled) }} {{- if .Values.providers.kubernetesIngress.allowExternalNameServices }}
- "--providers.kubernetesingress.allowExternalNameServices=true" - "--providers.kubernetesingress.allowExternalNameServices=true"
{{- end }} {{- end }}
{{- if .Values.providers.kubernetesIngress.allowEmptyServices }} {{- if .Values.providers.kubernetesIngress.allowEmptyServices }}
@ -555,8 +550,10 @@
{{- end }} {{- end }}
{{- if $config.http3 }} {{- if $config.http3 }}
{{- if $config.http3.enabled }} {{- if $config.http3.enabled }}
{{- if semverCompare "<3.0.0-0" (default $.Chart.AppVersion $.Values.image.tag)}}
- "--experimental.http3=true" - "--experimental.http3=true"
{{- if semverCompare ">=2.6.0" (default $.Chart.AppVersion $.Values.image.tag)}} {{- end }}
{{- if semverCompare ">=2.6.0-0" (default $.Chart.AppVersion $.Values.image.tag)}}
{{- if $config.http3.advertisedPort }} {{- if $config.http3.advertisedPort }}
- "--entrypoints.{{ $entrypoint }}.http3.advertisedPort={{ $config.http3.advertisedPort }}" - "--entrypoints.{{ $entrypoint }}.http3.advertisedPort={{ $config.http3.advertisedPort }}"
{{- else }} {{- else }}
@ -636,29 +633,6 @@
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- if .Values.hub.enabled }}
- "--hub"
{{- if .Values.hub.tunnelPort }}
- --entrypoints.traefikhub-tunl.address=:{{.Values.hub.tunnelPort}}
{{- end }}
{{- with .Values.hub.tls }}
{{- if (and .insecure (coalesce .ca .cert .key)) }}
{{- fail "ERROR: You cannot specify insecure and certs on TLS for Traefik Hub at the same time" }}
{{- end }}
{{- if .insecure }}
- "--hub.tls.insecure=true"
{{- end }}
{{- if .ca }}
- "--hub.tls.ca={{ .ca }}"
{{- end }}
{{- if .cert }}
- "--hub.tls.cert={{ .cert }}"
{{- end }}
{{- if .key }}
- "--hub.tls.key={{ .key }}"
{{- end }}
{{- end }}
{{- end }}
{{- with .Values.additionalArguments }} {{- with .Values.additionalArguments }}
{{- range . }} {{- range . }}
- {{ . | quote }} - {{ . | quote }}

View File

@ -18,5 +18,8 @@ app.kubernetes.io/component: metrics
{{ include "traefik.metricslabelselector" . }} {{ include "traefik.metricslabelselector" . }}
helm.sh/chart: {{ template "traefik.chart" . }} helm.sh/chart: {{ template "traefik.chart" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- with .Values.commonLabels }}
{{ toYaml . }}
{{- end }}
{{- end }} {{- end }}

View File

@ -9,6 +9,9 @@
{{- define "traefik.service-spec" -}} {{- define "traefik.service-spec" -}}
{{- $type := default "LoadBalancer" .Values.service.type }} {{- $type := default "LoadBalancer" .Values.service.type }}
type: {{ $type }} type: {{ $type }}
{{- with .Values.service.loadBalancerClass }}
loadBalancerClass: {{ . }}
{{- end}}
{{- with .Values.service.spec }} {{- with .Values.service.spec }}
{{- toYaml . | nindent 2 }} {{- toYaml . | nindent 2 }}
{{- end }} {{- end }}
@ -43,6 +46,9 @@
{{- if $config.nodePort }} {{- if $config.nodePort }}
nodePort: {{ $config.nodePort }} nodePort: {{ $config.nodePort }}
{{- end }} {{- end }}
{{- if $config.appProtocol }}
appProtocol: {{ $config.appProtocol }}
{{- end }}
{{- end }} {{- end }}
{{- if $config.http3 }} {{- if $config.http3 }}
{{- if $config.http3.enabled }} {{- if $config.http3.enabled }}
@ -54,6 +60,9 @@
{{- if $config.nodePort }} {{- if $config.nodePort }}
nodePort: {{ $config.nodePort }} nodePort: {{ $config.nodePort }}
{{- end }} {{- end }}
{{- if $config.appProtocol }}
appProtocol: {{ $config.appProtocol }}
{{- end }}
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- end }} {{- end }}

View File

@ -6,6 +6,8 @@ kind: Gateway
metadata: metadata:
name: traefik-gateway name: traefik-gateway
namespace: {{ default (include "traefik.namespace" .) .Values.experimental.kubernetesGateway.namespace }} namespace: {{ default (include "traefik.namespace" .) .Values.experimental.kubernetesGateway.namespace }}
labels:
{{- include "traefik.labels" . | nindent 4 }}
{{- with .Values.experimental.kubernetesGateway.gateway.annotations }} {{- with .Values.experimental.kubernetesGateway.gateway.annotations }}
annotations: annotations:
{{- toYaml . | nindent 4 }} {{- toYaml . | nindent 4 }}

View File

@ -4,6 +4,8 @@ apiVersion: gateway.networking.k8s.io/v1alpha2
kind: GatewayClass kind: GatewayClass
metadata: metadata:
name: traefik name: traefik
labels:
{{- include "traefik.labels" . | nindent 4 }}
spec: spec:
controllerName: traefik.io/gateway-controller controllerName: traefik.io/gateway-controller
{{- end }} {{- end }}

View File

@ -4,14 +4,10 @@
{{- fail "ERROR: maxReplicas is required on HPA" }} {{- fail "ERROR: maxReplicas is required on HPA" }}
{{- end }} {{- end }}
{{- if .Capabilities.APIVersions.Has "autoscaling/v2" }} {{- if semverCompare ">=1.23.0-0" .Capabilities.KubeVersion.Version }}
apiVersion: autoscaling/v2 apiVersion: autoscaling/v2
{{- else if .Capabilities.APIVersions.Has "autoscaling/v2beta2" }}
apiVersion: autoscaling/v2beta2
{{- else if .Capabilities.APIVersions.Has "autoscaling/v2beta1" }}
apiVersion: autoscaling/v2beta1
{{- else }} {{- else }}
{{- fail "ERROR: You must have at least autoscaling/v2beta1 to use HorizontalPodAutoscaler" }} apiVersion: autoscaling/v2beta2
{{- end }} {{- end }}
kind: HorizontalPodAutoscaler kind: HorizontalPodAutoscaler
metadata: metadata:

View File

@ -1,25 +0,0 @@
{{- if .Values.hub.enabled -}}
apiVersion: v1
kind: Service
metadata:
name: traefik-hub
namespace: {{ template "traefik.namespace" . }}
{{- template "traefik.service-metadata" . }}
spec:
type: ClusterIP
selector:
{{- include "traefik.labelselector" . | nindent 4 }}
ports:
- port: {{ .Values.ports.metrics.port }}
name: "metrics"
targetPort: metrics
protocol: TCP
{{- if .Values.ports.metrics.nodePort }}
nodePort: {{ .Values.ports.metrics.nodePort }}
{{- end }}
- port: {{ default 9901 .Values.hub.tunnelPort }}
name: "traefikhub-tunl"
targetPort: traefikhub-tunl
protocol: TCP
{{- end -}}

View File

@ -1,6 +1,6 @@
{{- if .Values.metrics.prometheus }} {{- if .Values.metrics.prometheus }}
{{- if .Values.metrics.prometheus.service }} {{- if .Values.metrics.prometheus.service }}
{{- if (and (.Values.metrics.prometheus.service).enabled (not .Values.hub.enabled)) -}} {{- if (.Values.metrics.prometheus.service).enabled -}}
{{- $fullname := include "traefik.fullname" . }} {{- $fullname := include "traefik.fullname" . }}
{{- if ge (len $fullname) 50 }} {{- if ge (len $fullname) 50 }}

View File

@ -22,7 +22,7 @@
{{- end -}} {{- end -}}
{{- end -}} {{- end -}}
{{- if and (eq $exposedPorts false) (not .Values.hub.enabled) -}} {{- if (eq $exposedPorts false) -}}
{{- fail "You need to expose at least one port or set enabled=false to service" -}} {{- fail "You need to expose at least one port or set enabled=false to service" -}}
{{- end -}} {{- end -}}

View File

@ -13,7 +13,7 @@ metadata:
namespace: {{ . }} namespace: {{ . }}
{{- end }} {{- end }}
labels: labels:
{{- if (and (.Values.metrics.prometheus.service).enabled (not .Values.hub.enabled)) }} {{- if (.Values.metrics.prometheus.service).enabled }}
{{- include "traefik.metricsservicelabels" . | nindent 4 }} {{- include "traefik.metricsservicelabels" . | nindent 4 }}
{{- else }} {{- else }}
{{- include "traefik.labels" . | nindent 4 }} {{- include "traefik.labels" . | nindent 4 }}
@ -24,7 +24,7 @@ metadata:
spec: spec:
jobLabel: {{ .Values.metrics.prometheus.serviceMonitor.jobLabel | default .Release.Name }} jobLabel: {{ .Values.metrics.prometheus.serviceMonitor.jobLabel | default .Release.Name }}
endpoints: endpoints:
- port: metrics - targetPort: metrics
path: /{{ .Values.metrics.prometheus.entryPoint }} path: /{{ .Values.metrics.prometheus.entryPoint }}
{{- with .Values.metrics.prometheus.serviceMonitor.honorLabels }} {{- with .Values.metrics.prometheus.serviceMonitor.honorLabels }}
honorLabels: {{ . }} honorLabels: {{ . }}
@ -62,7 +62,7 @@ spec:
{{- end }} {{- end }}
selector: selector:
matchLabels: matchLabels:
{{- if (and (.Values.metrics.prometheus.service).enabled (not .Values.hub.enabled)) }} {{- if (.Values.metrics.prometheus.service).enabled }}
{{- include "traefik.metricslabelselector" . | nindent 6 }} {{- include "traefik.metricslabelselector" . | nindent 6 }}
{{- else }} {{- else }}
{{- include "traefik.labelselector" . | nindent 6 }} {{- include "traefik.labelselector" . | nindent 6 }}

View File

@ -1,70 +1,56 @@
# Default values for Traefik # Default values for Traefik
image: image:
# -- Traefik image host registry
registry: docker.io registry: docker.io
# -- Traefik image repository
repository: traefik repository: traefik
# defaults to appVersion # -- defaults to appVersion
tag: "" tag: ""
# -- Traefik image pull policy
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
# # -- Add additional label to all resources
# Configure integration with Traefik Hub commonLabels: {}
#
hub:
## Enabling Hub will:
# * enable Traefik Hub integration on Traefik
# * add `traefikhub-tunl` endpoint
# * enable Prometheus metrics with addRoutersLabels
# * enable allowExternalNameServices on KubernetesIngress provider
# * enable allowCrossNamespace on KubernetesCRD provider
# * add an internal (ClusterIP) Service, dedicated for Traefik Hub
enabled: false
## Default port can be changed
# tunnelPort: 9901
## TLS is optional. Insecure is mutually exclusive with any other options
# tls:
# insecure: false
# ca: "/path/to/ca.pem"
# cert: "/path/to/cert.pem"
# key: "/path/to/key.pem"
# #
# Configure the deployment # Configure the deployment
# #
deployment: deployment:
# -- Enable deployment
enabled: true enabled: true
# Can be either Deployment or DaemonSet # -- Deployment or DaemonSet
kind: Deployment kind: Deployment
# Number of pods of the deployment (only applies when kind == Deployment) # -- Number of pods of the deployment (only applies when kind == Deployment)
replicas: 1 replicas: 1
# Number of old history to retain to allow rollback (If not set, default Kubernetes value is set to 10) # -- Number of old history to retain to allow rollback (If not set, default Kubernetes value is set to 10)
# revisionHistoryLimit: 1 # revisionHistoryLimit: 1
# Amount of time (in seconds) before Kubernetes will send the SIGKILL signal if Traefik does not shut down # -- Amount of time (in seconds) before Kubernetes will send the SIGKILL signal if Traefik does not shut down
terminationGracePeriodSeconds: 60 terminationGracePeriodSeconds: 60
# The minimum number of seconds Traefik needs to be up and running before the DaemonSet/Deployment controller considers it available # -- The minimum number of seconds Traefik needs to be up and running before the DaemonSet/Deployment controller considers it available
minReadySeconds: 0 minReadySeconds: 0
# Additional deployment annotations (e.g. for jaeger-operator sidecar injection) # -- Additional deployment annotations (e.g. for jaeger-operator sidecar injection)
annotations: {} annotations: {}
# Additional deployment labels (e.g. for filtering deployment by custom labels) # -- Additional deployment labels (e.g. for filtering deployment by custom labels)
labels: {} labels: {}
# Additional pod annotations (e.g. for mesh injection or prometheus scraping) # -- Additional pod annotations (e.g. for mesh injection or prometheus scraping)
podAnnotations: {} podAnnotations: {}
# Additional Pod labels (e.g. for filtering Pod by custom labels) # -- Additional Pod labels (e.g. for filtering Pod by custom labels)
podLabels: {} podLabels: {}
# Additional containers (e.g. for metric offloading sidecars) # -- Additional containers (e.g. for metric offloading sidecars)
additionalContainers: [] additionalContainers: []
# https://docs.datadoghq.com/developers/dogstatsd/unix_socket/?tab=host # https://docs.datadoghq.com/developers/dogstatsd/unix_socket/?tab=host
# - name: socat-proxy # - name: socat-proxy
# image: alpine/socat:1.0.5 # image: alpine/socat:1.0.5
# args: ["-s", "-u", "udp-recv:8125", "unix-sendto:/socket/socket"] # args: ["-s", "-u", "udp-recv:8125", "unix-sendto:/socket/socket"]
# volumeMounts: # volumeMounts:
# - name: dsdsocket # - name: dsdsocket
# mountPath: /socket # mountPath: /socket
# Additional volumes available for use with initContainers and additionalContainers # -- Additional volumes available for use with initContainers and additionalContainers
additionalVolumes: [] additionalVolumes: []
# - name: dsdsocket # - name: dsdsocket
# hostPath: # hostPath:
# path: /var/run/statsd-exporter # path: /var/run/statsd-exporter
# Additional initContainers (e.g. for setting file permission as shown below) # -- Additional initContainers (e.g. for setting file permission as shown below)
initContainers: [] initContainers: []
# The "volume-permissions" init container is required if you run into permission issues. # The "volume-permissions" init container is required if you run into permission issues.
# Related issue: https://github.com/traefik/traefik-helm-chart/issues/396 # Related issue: https://github.com/traefik/traefik-helm-chart/issues/396
@ -78,9 +64,9 @@ deployment:
# volumeMounts: # volumeMounts:
# - name: data # - name: data
# mountPath: /data # mountPath: /data
# Use process namespace sharing # -- Use process namespace sharing
shareProcessNamespace: false shareProcessNamespace: false
# Custom pod DNS policy. Apply if `hostNetwork: true` # -- Custom pod DNS policy. Apply if `hostNetwork: true`
# dnsPolicy: ClusterFirstWithHostNet # dnsPolicy: ClusterFirstWithHostNet
dnsConfig: {} dnsConfig: {}
# nameservers: # nameservers:
@ -92,10 +78,10 @@ deployment:
# - name: ndots # - name: ndots
# value: "2" # value: "2"
# - name: edns0 # - name: edns0
# Additional imagePullSecrets # -- Additional imagePullSecrets
imagePullSecrets: [] imagePullSecrets: []
# - name: myRegistryKeySecretName # - name: myRegistryKeySecretName
# Pod lifecycle actions # -- Pod lifecycle actions
lifecycle: {} lifecycle: {}
# preStop: # preStop:
# exec: # exec:
@ -107,7 +93,7 @@ deployment:
# host: localhost # host: localhost
# scheme: HTTP # scheme: HTTP
# Pod disruption budget # -- Pod disruption budget
podDisruptionBudget: podDisruptionBudget:
enabled: false enabled: false
# maxUnavailable: 1 # maxUnavailable: 1
@ -115,93 +101,112 @@ podDisruptionBudget:
# minAvailable: 0 # minAvailable: 0
# minAvailable: 25% # minAvailable: 25%
# Create a default IngressClass for Traefik # -- Create a default IngressClass for Traefik
ingressClass: ingressClass:
enabled: true enabled: true
isDefaultClass: true isDefaultClass: true
# Enable experimental features # Traefik experimental features
experimental: experimental:
v3: v3:
# -- Enable traefik version 3
enabled: false enabled: false
plugins: plugins:
# -- Enable traefik experimental plugins
enabled: false enabled: false
kubernetesGateway: kubernetesGateway:
# -- Enable traefik experimental GatewayClass CRD
enabled: false enabled: false
gateway: gateway:
# -- Enable traefik regular kubernetes gateway
enabled: true enabled: true
# certificate: # certificate:
# group: "core" # group: "core"
# kind: "Secret" # kind: "Secret"
# name: "mysecret" # name: "mysecret"
# By default, Gateway would be created to the Namespace you are deploying Traefik to. # -- By default, Gateway would be created to the Namespace you are deploying Traefik to.
# You may create that Gateway in another namespace, setting its name below: # You may create that Gateway in another namespace, setting its name below:
# namespace: default # namespace: default
# Additional gateway annotations (e.g. for cert-manager.io/issuer) # Additional gateway annotations (e.g. for cert-manager.io/issuer)
# annotations: # annotations:
# cert-manager.io/issuer: letsencrypt # cert-manager.io/issuer: letsencrypt
# Create an IngressRoute for the dashboard ## Create an IngressRoute for the dashboard
ingressRoute: ingressRoute:
dashboard: dashboard:
# -- Create an IngressRoute for the dashboard
enabled: true enabled: true
# Additional ingressRoute annotations (e.g. for kubernetes.io/ingress.class) # -- Additional ingressRoute annotations (e.g. for kubernetes.io/ingress.class)
annotations: {} annotations: {}
# Additional ingressRoute labels (e.g. for filtering IngressRoute by custom labels) # -- Additional ingressRoute labels (e.g. for filtering IngressRoute by custom labels)
labels: {} labels: {}
# The router match rule used for the dashboard ingressRoute # -- The router match rule used for the dashboard ingressRoute
matchRule: PathPrefix(`/dashboard`) || PathPrefix(`/api`) matchRule: PathPrefix(`/dashboard`) || PathPrefix(`/api`)
# Specify the allowed entrypoints to use for the dashboard ingress route, (e.g. traefik, web, websecure). # -- Specify the allowed entrypoints to use for the dashboard ingress route, (e.g. traefik, web, websecure).
# By default, it's using traefik entrypoint, which is not exposed. # By default, it's using traefik entrypoint, which is not exposed.
# /!\ Do not expose your dashboard without any protection over the internet /!\ # /!\ Do not expose your dashboard without any protection over the internet /!\
entryPoints: ["traefik"] entryPoints: ["traefik"]
# Additional ingressRoute middlewares (e.g. for authentication) # -- Additional ingressRoute middlewares (e.g. for authentication)
middlewares: [] middlewares: []
# TLS options (e.g. secret containing certificate) # -- TLS options (e.g. secret containing certificate)
tls: {} tls: {}
# Customize updateStrategy of traefik pods
updateStrategy: updateStrategy:
# -- Customize updateStrategy: RollingUpdate or OnDelete
type: RollingUpdate type: RollingUpdate
rollingUpdate: rollingUpdate:
maxUnavailable: 0 maxUnavailable: 0
maxSurge: 1 maxSurge: 1
# Customize liveness and readiness probe values.
readinessProbe: readinessProbe:
# -- The number of consecutive failures allowed before considering the probe as failed.
failureThreshold: 1 failureThreshold: 1
# -- The number of seconds to wait before starting the first probe.
initialDelaySeconds: 2 initialDelaySeconds: 2
# -- The number of seconds to wait between consecutive probes.
periodSeconds: 10 periodSeconds: 10
# -- The minimum consecutive successes required to consider the probe successful.
successThreshold: 1 successThreshold: 1
# -- The number of seconds to wait for a probe response before considering it as failed.
timeoutSeconds: 2 timeoutSeconds: 2
livenessProbe: livenessProbe:
# -- The number of consecutive failures allowed before considering the probe as failed.
failureThreshold: 3 failureThreshold: 3
# -- The number of seconds to wait before starting the first probe.
initialDelaySeconds: 2 initialDelaySeconds: 2
# -- The number of seconds to wait between consecutive probes.
periodSeconds: 10 periodSeconds: 10
# -- The minimum consecutive successes required to consider the probe successful.
successThreshold: 1 successThreshold: 1
# -- The number of seconds to wait for a probe response before considering it as failed.
timeoutSeconds: 2 timeoutSeconds: 2
#
# Configure providers
#
providers: providers:
kubernetesCRD: kubernetesCRD:
# -- Load Kubernetes IngressRoute provider
enabled: true enabled: true
# -- Allows IngressRoute to reference resources in namespace other than theirs
allowCrossNamespace: false allowCrossNamespace: false
# -- Allows to reference ExternalName services in IngressRoute
allowExternalNameServices: false allowExternalNameServices: false
# -- Allows to return 503 when there is no endpoints available
allowEmptyServices: false allowEmptyServices: false
# ingressClass: traefik-internal # ingressClass: traefik-internal
# labelSelector: environment=production,method=traefik # labelSelector: environment=production,method=traefik
# -- Array of namespaces to watch. If left empty, Traefik watches all namespaces.
namespaces: [] namespaces: []
# - "default" # - "default"
kubernetesIngress: kubernetesIngress:
# -- Load Kubernetes IngressRoute provider
enabled: true enabled: true
# -- Allows to reference ExternalName services in Ingress
allowExternalNameServices: false allowExternalNameServices: false
# -- Allows to return 503 when there is no endpoints available
allowEmptyServices: false allowEmptyServices: false
# ingressClass: traefik-internal # ingressClass: traefik-internal
# labelSelector: environment=production,method=traefik # labelSelector: environment=production,method=traefik
# -- Array of namespaces to watch. If left empty, Traefik watches all namespaces.
namespaces: [] namespaces: []
# - "default" # - "default"
# IP used for Kubernetes Ingress endpoints # IP used for Kubernetes Ingress endpoints
@ -212,13 +217,13 @@ providers:
# pathOverride: "" # pathOverride: ""
# #
# Add volumes to the traefik pod. The volume name will be passed to tpl. # -- Add volumes to the traefik pod. The volume name will be passed to tpl.
# This can be used to mount a cert pair or a configmap that holds a config.toml file. # This can be used to mount a cert pair or a configmap that holds a config.toml file.
# After the volume has been mounted, add the configs into traefik by using the `additionalArguments` list below, eg: # After the volume has been mounted, add the configs into traefik by using the `additionalArguments` list below, eg:
# additionalArguments: # `additionalArguments:
# - "--providers.file.filename=/config/dynamic.toml" # - "--providers.file.filename=/config/dynamic.toml"
# - "--ping" # - "--ping"
# - "--ping.entrypoint=web" # - "--ping.entrypoint=web"`
volumes: [] volumes: []
# - name: public-cert # - name: public-cert
# mountPath: "/certs" # mountPath: "/certs"
@ -227,25 +232,22 @@ volumes: []
# mountPath: "/config" # mountPath: "/config"
# type: configMap # type: configMap
# Additional volumeMounts to add to the Traefik container # -- Additional volumeMounts to add to the Traefik container
additionalVolumeMounts: [] additionalVolumeMounts: []
# For instance when using a logshipper for access logs # -- For instance when using a logshipper for access logs
# - name: traefik-logs # - name: traefik-logs
# mountPath: /var/log/traefik # mountPath: /var/log/traefik
## Logs
## https://docs.traefik.io/observability/logs/
logs: logs:
## Traefik logs concern everything that happens to Traefik itself (startup, configuration, events, shutdown, and so on).
general: general:
# By default, the logs use a text format (common), but you can # -- By default, the logs use a text format (common), but you can
# also ask for the json format in the format option # also ask for the json format in the format option
# format: json # format: json
# By default, the level is set to ERROR. # By default, the level is set to ERROR.
# Alternative logging levels are DEBUG, PANIC, FATAL, ERROR, WARN, and INFO. # -- Alternative logging levels are DEBUG, PANIC, FATAL, ERROR, WARN, and INFO.
level: ERROR level: ERROR
access: access:
# To enable access logs # -- To enable access logs
enabled: false enabled: false
## By default, logs are written using the Common Log Format (CLF) on stdout. ## By default, logs are written using the Common Log Format (CLF) on stdout.
## To write logs in JSON, use json in the format option. ## To write logs in JSON, use json in the format option.
@ -256,21 +258,24 @@ logs:
## This option represents the number of log lines Traefik will keep in memory before writing ## This option represents the number of log lines Traefik will keep in memory before writing
## them to the selected output. In some cases, this option can greatly help performances. ## them to the selected output. In some cases, this option can greatly help performances.
# bufferingSize: 100 # bufferingSize: 100
## Filtering https://docs.traefik.io/observability/access-logs/#filtering ## Filtering
# -- https://docs.traefik.io/observability/access-logs/#filtering
filters: {} filters: {}
# statuscodes: "200,300-302" # statuscodes: "200,300-302"
# retryattempts: true # retryattempts: true
# minduration: 10ms # minduration: 10ms
## Fields
## https://docs.traefik.io/observability/access-logs/#limiting-the-fieldsincluding-headers
fields: fields:
general: general:
# -- Available modes: keep, drop, redact.
defaultmode: keep defaultmode: keep
# -- Names of the fields to limit.
names: {} names: {}
## Examples: ## Examples:
# ClientUsername: drop # ClientUsername: drop
headers: headers:
# -- Available modes: keep, drop, redact.
defaultmode: drop defaultmode: drop
# -- Names of the headers to limit.
names: {} names: {}
## Examples: ## Examples:
# User-Agent: redact # User-Agent: redact
@ -278,10 +283,10 @@ logs:
# Content-Type: keep # Content-Type: keep
metrics: metrics:
## Prometheus is enabled by default. ## -- Prometheus is enabled by default.
## It can be disabled by setting "prometheus: null" ## -- It can be disabled by setting "prometheus: null"
prometheus: prometheus:
## Entry point used to expose metrics. # -- Entry point used to expose metrics.
entryPoint: metrics entryPoint: metrics
## Enable metrics on entry points. Default=true ## Enable metrics on entry points. Default=true
# addEntryPointsLabels: false # addEntryPointsLabels: false
@ -404,11 +409,9 @@ metrics:
# ## This instructs the reporter to send metrics to the OpenTelemetry Collector using gRPC. # ## This instructs the reporter to send metrics to the OpenTelemetry Collector using gRPC.
# grpc: true # grpc: true
## ## -- enable optional CRDs for Prometheus Operator
## enable optional CRDs for Prometheus Operator
## ##
## Create a dedicated metrics service for use with ServiceMonitor ## Create a dedicated metrics service for use with ServiceMonitor
## When hub.enabled is set to true, it's not needed: it will use hub service.
# service: # service:
# enabled: false # enabled: false
# labels: {} # labels: {}
@ -455,6 +458,8 @@ metrics:
# summary: "Traefik Down" # summary: "Traefik Down"
# description: "{{ $labels.pod }} on {{ $labels.nodename }} is down" # description: "{{ $labels.pod }} on {{ $labels.nodename }} is down"
## Tracing
# -- https://doc.traefik.io/traefik/observability/tracing/overview/
tracing: {} tracing: {}
# instana: # instana:
# localAgentHost: 127.0.0.1 # localAgentHost: 127.0.0.1
@ -497,20 +502,21 @@ tracing: {}
# secretToken: "" # secretToken: ""
# serviceEnvironment: "" # serviceEnvironment: ""
# -- Global command arguments to be passed to all traefik's pods
globalArguments: globalArguments:
- "--global.checknewversion" - "--global.checknewversion"
- "--global.sendanonymoususage" - "--global.sendanonymoususage"
# #
# Configure Traefik static configuration # Configure Traefik static configuration
# Additional arguments to be passed at Traefik's binary # -- Additional arguments to be passed at Traefik's binary
# All available options available on https://docs.traefik.io/reference/static-configuration/cli/ # All available options available on https://docs.traefik.io/reference/static-configuration/cli/
## Use curly braces to pass values: `helm install --set="additionalArguments={--providers.kubernetesingress.ingressclass=traefik-internal,--log.level=DEBUG}"` ## Use curly braces to pass values: `helm install --set="additionalArguments={--providers.kubernetesingress.ingressclass=traefik-internal,--log.level=DEBUG}"`
additionalArguments: [] additionalArguments: []
# - "--providers.kubernetesingress.ingressclass=traefik-internal" # - "--providers.kubernetesingress.ingressclass=traefik-internal"
# - "--log.level=DEBUG" # - "--log.level=DEBUG"
# Environment variables to be passed to Traefik's binary # -- Environment variables to be passed to Traefik's binary
env: [] env: []
# - name: SOME_VAR # - name: SOME_VAR
# value: some-var-value # value: some-var-value
@ -525,22 +531,20 @@ env: []
# name: secret-name # name: secret-name
# key: secret-key # key: secret-key
# -- Environment variables to be passed to Traefik's binary from configMaps or secrets
envFrom: [] envFrom: []
# - configMapRef: # - configMapRef:
# name: config-map-name # name: config-map-name
# - secretRef: # - secretRef:
# name: secret-name # name: secret-name
# Configure ports
ports: ports:
# The name of this one can't be changed as it is used for the readiness and
# liveness probes, but you can adjust its config to your liking
traefik: traefik:
port: 9000 port: 9000
# Use hostPort if set. # -- Use hostPort if set.
# hostPort: 9000 # hostPort: 9000
# #
# Use hostIP if set. If not set, Kubernetes will default to 0.0.0.0, which # -- Use hostIP if set. If not set, Kubernetes will default to 0.0.0.0, which
# means it's listening on all your interfaces and all your IPs. You may want # means it's listening on all your interfaces and all your IPs. You may want
# to set this value if you need traefik to listen on specific interface # to set this value if you need traefik to listen on specific interface
# only. # only.
@ -558,27 +562,27 @@ ports:
# Defines whether the port is exposed if service.type is LoadBalancer or # Defines whether the port is exposed if service.type is LoadBalancer or
# NodePort. # NodePort.
# #
# You SHOULD NOT expose the traefik port on production deployments. # -- You SHOULD NOT expose the traefik port on production deployments.
# If you want to access it from outside of your cluster, # If you want to access it from outside of your cluster,
# use `kubectl port-forward` or create a secure ingress # use `kubectl port-forward` or create a secure ingress
expose: false expose: false
# The exposed port for this service # -- The exposed port for this service
exposedPort: 9000 exposedPort: 9000
# The port protocol (TCP/UDP) # -- The port protocol (TCP/UDP)
protocol: TCP protocol: TCP
web: web:
## Enable this entrypoint as a default entrypoint. When a service doesn't explicity set an entrypoint it will only use this entrypoint. ## -- Enable this entrypoint as a default entrypoint. When a service doesn't explicity set an entrypoint it will only use this entrypoint.
# asDefault: true # asDefault: true
port: 8000 port: 8000
# hostPort: 8000 # hostPort: 8000
# containerPort: 8000 # containerPort: 8000
expose: true expose: true
exposedPort: 80 exposedPort: 80
## Different target traefik port on the cluster, useful for IP type LB ## -- Different target traefik port on the cluster, useful for IP type LB
# targetPort: 80 # targetPort: 80
# The port protocol (TCP/UDP) # The port protocol (TCP/UDP)
protocol: TCP protocol: TCP
# Use nodeport if set. This is useful if you have configured Traefik in a # -- Use nodeport if set. This is useful if you have configured Traefik in a
# LoadBalancer. # LoadBalancer.
# nodePort: 32080 # nodePort: 32080
# Port Redirections # Port Redirections
@ -596,20 +600,22 @@ ports:
# trustedIPs: [] # trustedIPs: []
# insecure: false # insecure: false
websecure: websecure:
## Enable this entrypoint as a default entrypoint. When a service doesn't explicity set an entrypoint it will only use this entrypoint. ## -- Enable this entrypoint as a default entrypoint. When a service doesn't explicity set an entrypoint it will only use this entrypoint.
# asDefault: true # asDefault: true
port: 8443 port: 8443
# hostPort: 8443 # hostPort: 8443
# containerPort: 8443 # containerPort: 8443
expose: true expose: true
exposedPort: 443 exposedPort: 443
## Different target traefik port on the cluster, useful for IP type LB ## -- Different target traefik port on the cluster, useful for IP type LB
# targetPort: 80 # targetPort: 80
## The port protocol (TCP/UDP) ## -- The port protocol (TCP/UDP)
protocol: TCP protocol: TCP
# nodePort: 32443 # nodePort: 32443
## -- Specify an application protocol. This may be used as a hint for a Layer 7 load balancer.
# appProtocol: https
# #
## Enable HTTP/3 on the entrypoint ## -- Enable HTTP/3 on the entrypoint
## Enabling it will also enable http3 experimental feature ## Enabling it will also enable http3 experimental feature
## https://doc.traefik.io/traefik/routing/entrypoints/#http3 ## https://doc.traefik.io/traefik/routing/entrypoints/#http3
## There are known limitations when trying to listen on same ports for ## There are known limitations when trying to listen on same ports for
@ -619,12 +625,12 @@ ports:
enabled: false enabled: false
# advertisedPort: 4443 # advertisedPort: 4443
# #
## Trust forwarded headers information (X-Forwarded-*). ## -- Trust forwarded headers information (X-Forwarded-*).
#forwardedHeaders: #forwardedHeaders:
# trustedIPs: [] # trustedIPs: []
# insecure: false # insecure: false
# #
## Enable the Proxy Protocol header parsing for the entry point ## -- Enable the Proxy Protocol header parsing for the entry point
#proxyProtocol: #proxyProtocol:
# trustedIPs: [] # trustedIPs: []
# insecure: false # insecure: false
@ -642,33 +648,33 @@ ports:
# - foo.example.com # - foo.example.com
# - bar.example.com # - bar.example.com
# #
# One can apply Middlewares on an entrypoint # -- One can apply Middlewares on an entrypoint
# https://doc.traefik.io/traefik/middlewares/overview/ # https://doc.traefik.io/traefik/middlewares/overview/
# https://doc.traefik.io/traefik/routing/entrypoints/#middlewares # https://doc.traefik.io/traefik/routing/entrypoints/#middlewares
# /!\ It introduces here a link between your static configuration and your dynamic configuration /!\ # -- /!\ It introduces here a link between your static configuration and your dynamic configuration /!\
# It follows the provider naming convention: https://doc.traefik.io/traefik/providers/overview/#provider-namespace # It follows the provider naming convention: https://doc.traefik.io/traefik/providers/overview/#provider-namespace
# middlewares: # middlewares:
# - namespace-name1@kubernetescrd # - namespace-name1@kubernetescrd
# - namespace-name2@kubernetescrd # - namespace-name2@kubernetescrd
middlewares: [] middlewares: []
metrics: metrics:
# When using hostNetwork, use another port to avoid conflict with node exporter: # -- When using hostNetwork, use another port to avoid conflict with node exporter:
# https://github.com/prometheus/prometheus/wiki/Default-port-allocations # https://github.com/prometheus/prometheus/wiki/Default-port-allocations
port: 9100 port: 9100
# hostPort: 9100 # hostPort: 9100
# Defines whether the port is exposed if service.type is LoadBalancer or # Defines whether the port is exposed if service.type is LoadBalancer or
# NodePort. # NodePort.
# #
# You may not want to expose the metrics port on production deployments. # -- You may not want to expose the metrics port on production deployments.
# If you want to access it from outside of your cluster, # If you want to access it from outside of your cluster,
# use `kubectl port-forward` or create a secure ingress # use `kubectl port-forward` or create a secure ingress
expose: false expose: false
# The exposed port for this service # -- The exposed port for this service
exposedPort: 9100 exposedPort: 9100
# The port protocol (TCP/UDP) # -- The port protocol (TCP/UDP)
protocol: TCP protocol: TCP
# TLS Options are created as TLSOption CRDs # -- TLS Options are created as TLSOption CRDs
# https://doc.traefik.io/traefik/https/tls/#tls-options # https://doc.traefik.io/traefik/https/tls/#tls-options
# When using `labelSelector`, you'll need to set labels on tlsOption accordingly. # When using `labelSelector`, you'll need to set labels on tlsOption accordingly.
# Example: # Example:
@ -684,7 +690,7 @@ ports:
# - CurveP384 # - CurveP384
tlsOptions: {} tlsOptions: {}
# TLS Store are created as TLSStore CRDs. This is useful if you want to set a default certificate # -- TLS Store are created as TLSStore CRDs. This is useful if you want to set a default certificate
# https://doc.traefik.io/traefik/https/tls/#default-certificate # https://doc.traefik.io/traefik/https/tls/#default-certificate
# Example: # Example:
# tlsStore: # tlsStore:
@ -693,24 +699,22 @@ tlsOptions: {}
# secretName: tls-cert # secretName: tls-cert
tlsStore: {} tlsStore: {}
# Options for the main traefik service, where the entrypoints traffic comes
# from.
service: service:
enabled: true enabled: true
## Single service is using `MixedProtocolLBService` feature gate. ## -- Single service is using `MixedProtocolLBService` feature gate.
## When set to false, it will create two Service, one for TCP and one for UDP. ## -- When set to false, it will create two Service, one for TCP and one for UDP.
single: true single: true
type: LoadBalancer type: LoadBalancer
# Additional annotations applied to both TCP and UDP services (e.g. for cloud provider specific config) # -- Additional annotations applied to both TCP and UDP services (e.g. for cloud provider specific config)
annotations: {} annotations: {}
# Additional annotations for TCP service only # -- Additional annotations for TCP service only
annotationsTCP: {} annotationsTCP: {}
# Additional annotations for UDP service only # -- Additional annotations for UDP service only
annotationsUDP: {} annotationsUDP: {}
# Additional service labels (e.g. for filtering Service by custom labels) # -- Additional service labels (e.g. for filtering Service by custom labels)
labels: {} labels: {}
# Additional entries here will be added to the service spec. # -- Additional entries here will be added to the service spec.
# Cannot contain type, selector or ports entries. # -- Cannot contain type, selector or ports entries.
spec: {} spec: {}
# externalTrafficPolicy: Cluster # externalTrafficPolicy: Cluster
# loadBalancerIP: "1.2.3.4" # loadBalancerIP: "1.2.3.4"
@ -718,6 +722,8 @@ service:
loadBalancerSourceRanges: [] loadBalancerSourceRanges: []
# - 192.168.0.1/32 # - 192.168.0.1/32
# - 172.16.0.0/16 # - 172.16.0.0/16
## -- Class of the load balancer implementation
# loadBalancerClass: service.k8s.aws/nlb
externalIPs: [] externalIPs: []
# - 1.2.3.4 # - 1.2.3.4
## One of SingleStack, PreferDualStack, or RequireDualStack. ## One of SingleStack, PreferDualStack, or RequireDualStack.
@ -728,7 +734,7 @@ service:
# - IPv4 # - IPv4
# - IPv6 # - IPv6
## ##
## An additionnal and optional internal Service. ## -- An additionnal and optional internal Service.
## Same parameters as external Service ## Same parameters as external Service
# internal: # internal:
# type: ClusterIP # type: ClusterIP
@ -739,9 +745,8 @@ service:
# # externalIPs: [] # # externalIPs: []
# # ipFamilies: [ "IPv4","IPv6" ] # # ipFamilies: [ "IPv4","IPv6" ]
## Create HorizontalPodAutoscaler object.
##
autoscaling: autoscaling:
# -- Create HorizontalPodAutoscaler object.
enabled: false enabled: false
# minReplicas: 1 # minReplicas: 1
# maxReplicas: 10 # maxReplicas: 10
@ -766,10 +771,10 @@ autoscaling:
# value: 1 # value: 1
# periodSeconds: 60 # periodSeconds: 60
# Enable persistence using Persistent Volume Claims
# ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
# It can be used to store TLS certificates, see `storage` in certResolvers
persistence: persistence:
# -- Enable persistence using Persistent Volume Claims
# ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
# It can be used to store TLS certificates, see `storage` in certResolvers
enabled: false enabled: false
name: data name: data
# existingClaim: "" # existingClaim: ""
@ -779,8 +784,10 @@ persistence:
# volumeName: "" # volumeName: ""
path: /data path: /data
annotations: {} annotations: {}
# subPath: "" # only mount a subpath of the Volume into the pod # -- Only mount a subpath of the Volume into the pod
# subPath: ""
# -- Certificates resolvers configuration
certResolvers: {} certResolvers: {}
# letsencrypt: # letsencrypt:
# # for challenge options cf. https://doc.traefik.io/traefik/https/acme/ # # for challenge options cf. https://doc.traefik.io/traefik/https/acme/
@ -802,13 +809,13 @@ certResolvers: {}
# # It has to match the path with a persistent volume # # It has to match the path with a persistent volume
# storage: /data/acme.json # storage: /data/acme.json
# If hostNetwork is true, runs traefik in the host network namespace # -- If hostNetwork is true, runs traefik in the host network namespace
# To prevent unschedulabel pods due to port collisions, if hostNetwork=true # To prevent unschedulabel pods due to port collisions, if hostNetwork=true
# and replicas>1, a pod anti-affinity is recommended and will be set if the # and replicas>1, a pod anti-affinity is recommended and will be set if the
# affinity is left as default. # affinity is left as default.
hostNetwork: false hostNetwork: false
# Whether Role Based Access Control objects like roles and rolebindings should be created # -- Whether Role Based Access Control objects like roles and rolebindings should be created
rbac: rbac:
enabled: true enabled: true
# If set to false, installs ClusterRole and ClusterRoleBinding so Traefik can be used across namespaces. # If set to false, installs ClusterRole and ClusterRoleBinding so Traefik can be used across namespaces.
@ -818,19 +825,20 @@ rbac:
# https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles # https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles
# aggregateTo: [ "admin" ] # aggregateTo: [ "admin" ]
# Enable to create a PodSecurityPolicy and assign it to the Service Account via RoleBinding or ClusterRoleBinding # -- Enable to create a PodSecurityPolicy and assign it to the Service Account via RoleBinding or ClusterRoleBinding
podSecurityPolicy: podSecurityPolicy:
enabled: false enabled: false
# The service account the pods will use to interact with the Kubernetes API # -- The service account the pods will use to interact with the Kubernetes API
serviceAccount: serviceAccount:
# If set, an existing service account is used # If set, an existing service account is used
# If not set, a service account is created automatically using the fullname template # If not set, a service account is created automatically using the fullname template
name: "" name: ""
# Additional serviceAccount annotations (e.g. for oidc authentication) # -- Additional serviceAccount annotations (e.g. for oidc authentication)
serviceAccountAnnotations: {} serviceAccountAnnotations: {}
# -- The resources parameter defines CPU and memory requirements and limits for Traefik's containers.
resources: {} resources: {}
# requests: # requests:
# cpu: "100m" # cpu: "100m"
@ -839,8 +847,8 @@ resources: {}
# cpu: "300m" # cpu: "300m"
# memory: "150Mi" # memory: "150Mi"
# This example pod anti-affinity forces the scheduler to put traefik pods # -- This example pod anti-affinity forces the scheduler to put traefik pods
# on nodes where no other traefik pods are scheduled. # -- on nodes where no other traefik pods are scheduled.
# It should be used when hostNetwork: true to prevent port conflicts # It should be used when hostNetwork: true to prevent port conflicts
affinity: {} affinity: {}
# podAntiAffinity: # podAntiAffinity:
@ -851,11 +859,15 @@ affinity: {}
# app.kubernetes.io/instance: '{{ .Release.Name }}-{{ .Release.Namespace }}' # app.kubernetes.io/instance: '{{ .Release.Name }}-{{ .Release.Namespace }}'
# topologyKey: kubernetes.io/hostname # topologyKey: kubernetes.io/hostname
# -- nodeSelector is the simplest recommended form of node selection constraint.
nodeSelector: {} nodeSelector: {}
# -- Tolerations allow the scheduler to schedule pods with matching taints.
tolerations: [] tolerations: []
# -- You can use topology spread constraints to control
# how Pods are spread across your cluster among failure-domains.
topologySpreadConstraints: [] topologySpreadConstraints: []
# # This example topologySpreadConstraints forces the scheduler to put traefik pods # This example topologySpreadConstraints forces the scheduler to put traefik pods
# # on nodes where no other traefik pods are scheduled. # on nodes where no other traefik pods are scheduled.
# - labelSelector: # - labelSelector:
# matchLabels: # matchLabels:
# app: '{{ template "traefik.name" . }}' # app: '{{ template "traefik.name" . }}'
@ -863,29 +875,33 @@ topologySpreadConstraints: []
# topologyKey: kubernetes.io/hostname # topologyKey: kubernetes.io/hostname
# whenUnsatisfiable: DoNotSchedule # whenUnsatisfiable: DoNotSchedule
# Pods can have priority. # -- Pods can have priority.
# Priority indicates the importance of a Pod relative to other Pods. # -- Priority indicates the importance of a Pod relative to other Pods.
priorityClassName: "" priorityClassName: ""
# Set the container security context # -- Set the container security context
# To run the container with ports below 1024 this will need to be adjust to run as root # -- To run the container with ports below 1024 this will need to be adjust to run as root
securityContext: securityContext:
capabilities: capabilities:
drop: [ALL] drop: [ALL]
readOnlyRootFilesystem: true readOnlyRootFilesystem: true
podSecurityContext: podSecurityContext:
# # /!\ When setting fsGroup, Kubernetes will recursively changes ownership and # /!\ When setting fsGroup, Kubernetes will recursively changes ownership and
# # permissions for the contents of each volume to match the fsGroup. This can # permissions for the contents of each volume to match the fsGroup. This can
# # be an issue when storing sensitive content like TLS Certificates /!\ # be an issue when storing sensitive content like TLS Certificates /!\
# fsGroup: 65532 # fsGroup: 65532
# -- Specifies the policy for changing ownership and permissions of volume contents to match the fsGroup.
fsGroupChangePolicy: "OnRootMismatch" fsGroupChangePolicy: "OnRootMismatch"
# -- The ID of the group for all containers in the pod to run as.
runAsGroup: 65532 runAsGroup: 65532
# -- Specifies whether the containers should run as a non-root user.
runAsNonRoot: true runAsNonRoot: true
# -- The ID of the user for all containers in the pod to run as.
runAsUser: 65532 runAsUser: 65532
# #
# Extra objects to deploy (value evaluated as a template) # -- Extra objects to deploy (value evaluated as a template)
# #
# In some cases, it can avoid the need for additional, extended or adhoc deployments. # In some cases, it can avoid the need for additional, extended or adhoc deployments.
# See #595 for more details and traefik/tests/values/extra.yaml for example. # See #595 for more details and traefik/tests/values/extra.yaml for example.
@ -895,5 +911,5 @@ extraObjects: []
# It will not affect optional CRDs such as `ServiceMonitor` and `PrometheusRules` # It will not affect optional CRDs such as `ServiceMonitor` and `PrometheusRules`
# namespaceOverride: traefik # namespaceOverride: traefik
# #
## This will override the default app.kubernetes.io/instance label for all Objects. ## -- This will override the default app.kubernetes.io/instance label for all Objects.
# instanceLabelOverride: traefik # instanceLabelOverride: traefik

View File

@ -1182,6 +1182,47 @@ entries:
- assets/ambassador/ambassador-6.7.1100.tgz - assets/ambassador/ambassador-6.7.1100.tgz
version: 6.7.1100 version: 6.7.1100
argo-cd: argo-cd:
- annotations:
artifacthub.io/changes: |
- kind: changed
description: Upgrade Argo CD to v2.7.4
- kind: added
description: Update knownHosts
artifacthub.io/signKey: |
fingerprint: 2B8F22F57260EFA67BE1C5824B11F800CD9D2252
url: https://argoproj.github.io/argo-helm/pgp_keys.asc
catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: Argo CD
catalog.cattle.io/kube-version: '>=1.23.0-0'
catalog.cattle.io/release-name: argo-cd
apiVersion: v2
appVersion: v2.7.4
created: "2023-06-06T17:23:39.306413805Z"
dependencies:
- condition: redis-ha.enabled
name: redis-ha
repository: file://./charts/redis-ha
version: 4.23.0
description: A Helm chart for Argo CD, a declarative, GitOps continuous delivery
tool for Kubernetes.
digest: 0515d36b38ceceae9624aaafdc249afc08c17aed7eab1262d8d770abfb45104a
home: https://github.com/argoproj/argo-helm
icon: https://argo-cd.readthedocs.io/en/stable/assets/logo.png
keywords:
- argoproj
- argocd
- gitops
kubeVersion: '>=1.23.0-0'
maintainers:
- name: argoproj
url: https://argoproj.github.io/
name: argo-cd
sources:
- https://github.com/argoproj/argo-helm/tree/main/charts/argo-cd
- https://github.com/argoproj/argo-cd
urls:
- assets/argo/argo-cd-5.35.1.tgz
version: 5.35.1
- annotations: - annotations:
artifacthub.io/changes: | artifacthub.io/changes: |
- kind: changed - kind: changed
@ -4839,6 +4880,71 @@ entries:
- assets/jfrog/artifactory-jcr-2.5.100.tgz - assets/jfrog/artifactory-jcr-2.5.100.tgz
version: 2.5.100 version: 2.5.100
asserts: asserts:
- annotations:
catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: Asserts
catalog.cattle.io/kube-version: '>=1.17-0'
catalog.cattle.io/release-name: asserts
apiVersion: v2
created: "2023-06-06T17:23:39.769259412Z"
dependencies:
- condition: knowledge-sensor.enabled
name: knowledge-sensor
repository: file://./charts/knowledge-sensor
version: 1.1.0
- alias: tsdb
condition: tsdb.enabled
name: victoria-metrics-single
repository: file://./charts/victoria-metrics-single
version: 1.1.0
- condition: alertmanager.enabled
name: alertmanager
repository: file://./charts/alertmanager
version: 1.0.0
- alias: promxyruler
condition: promxyruler.enabled
name: promxy
repository: file://./charts/promxy
version: 0.8.0
- alias: promxyuser
condition: promxyuser.enabled
name: promxy
repository: file://./charts/promxy
version: 0.8.0
- alias: ebpfProbe
condition: ebpfProbe.enabled
name: ebpf-probe
repository: file://./charts/ebpf-probe
version: 0.7.0
- name: common
repository: file://./charts/common
version: 1.x.x
- alias: redisgraph
condition: redisgraph.enabled
name: redis
repository: file://./charts/redis
version: 16.13.2
- alias: redisearch
condition: redisearch.enabled
name: redis
repository: file://./charts/redis
version: 16.13.2
- alias: postgres
condition: postgres.enabled
name: postgresql
repository: file://./charts/postgresql
version: 11.9.13
description: Asserts Helm Chart to configure entire asserts stack
digest: a75f8faafda5b576a711460881463d643cb8afbeca28eb6aa95e255c589d373b
icon: https://www.asserts.ai/favicon.png
maintainers:
- name: Asserts
url: https://github.com/asserts
name: asserts
type: application
urls:
- assets/asserts/asserts-1.41.0.tgz
version: 1.41.0
- annotations: - annotations:
catalog.cattle.io/certified: partner catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: Asserts catalog.cattle.io/display-name: Asserts
@ -10803,6 +10909,43 @@ entries:
- assets/weka/csi-wekafsplugin-0.6.400.tgz - assets/weka/csi-wekafsplugin-0.6.400.tgz
version: 0.6.400 version: 0.6.400
datadog: datadog:
- annotations:
catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: Datadog
catalog.cattle.io/kube-version: '>=1.10-0'
catalog.cattle.io/release-name: datadog
apiVersion: v1
appVersion: "7"
created: "2023-06-06T17:23:42.3048654Z"
dependencies:
- condition: clusterAgent.metricsProvider.useDatadogMetrics
name: datadog-crds
repository: https://helm.datadoghq.com
tags:
- install-crds
version: 0.4.7
- condition: datadog.kubeStateMetricsEnabled
name: kube-state-metrics
repository: https://prometheus-community.github.io/helm-charts
version: 2.13.2
description: Datadog Agent
digest: e24e164a06cc5107f21af8822b2a149c00170e802a529dc93576ca09d1709a30
home: https://www.datadoghq.com
icon: https://datadog-live.imgix.net/img/dd_logo_70x75.png
keywords:
- monitoring
- alerting
- metric
maintainers:
- email: support@datadoghq.com
name: Datadog
name: datadog
sources:
- https://app.datadoghq.com/account/settings#agent/kubernetes
- https://github.com/DataDog/datadog-agent
urls:
- assets/datadog/datadog-3.31.0.tgz
version: 3.31.0
- annotations: - annotations:
catalog.cattle.io/certified: partner catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: Datadog catalog.cattle.io/display-name: Datadog
@ -14379,7 +14522,7 @@ entries:
type: application type: application
urls: urls:
- assets/inaccel/fpga-operator-2.5.201.tgz - assets/inaccel/fpga-operator-2.5.201.tgz
version: 2.5.201 version: 2.5.201
gluu: gluu:
- annotations: - annotations:
artifacthub.io/changes: | artifacthub.io/changes: |
@ -15752,6 +15895,37 @@ entries:
- assets/haproxy/haproxy-1.4.300.tgz - assets/haproxy/haproxy-1.4.300.tgz
version: 1.4.300 version: 1.4.300
harbor: harbor:
- annotations:
catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: Harbor
catalog.cattle.io/kube-version: '>=1.20-0'
catalog.cattle.io/release-name: harbor
apiVersion: v1
appVersion: 2.8.2
created: "2023-06-06T17:23:42.753119764Z"
description: An open source trusted cloud native registry that stores, signs,
and scans content
digest: d7c464bbd6b7a5ec13e3c2e7efa73b9597c6e6b2ff77b63fb5f7312265fe8e37
home: https://goharbor.io
icon: https://raw.githubusercontent.com/goharbor/website/master/static/img/logos/harbor-icon-color.png
keywords:
- docker
- registry
- harbor
maintainers:
- email: yinw@vmware.com
name: Wenkai Yin
- email: hweiwei@vmware.com
name: Weiwei He
- email: yshengwen@vmware.com
name: Shengwen Yu
name: harbor
sources:
- https://github.com/goharbor/harbor
- https://github.com/goharbor/harbor-helm
urls:
- assets/harbor/harbor-1.12.2.tgz
version: 1.12.2
- annotations: - annotations:
catalog.cattle.io/certified: partner catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: Harbor catalog.cattle.io/display-name: Harbor
@ -21742,6 +21916,52 @@ entries:
- assets/kubemq/kubemq-crds-2.3.7.tgz - assets/kubemq/kubemq-crds-2.3.7.tgz
version: 2.3.7 version: 2.3.7
kubeslice-controller: kubeslice-controller:
- annotations:
catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: Avesha Kubeslice Controller
catalog.cattle.io/kube-version: '>= 1.19.0-0'
catalog.cattle.io/namespace: kubeslice-controller
catalog.cattle.io/release-name: kubeslice-controller
apiVersion: v2
appVersion: 1.0.0
created: "2023-06-06T17:23:39.846595665Z"
description: Multi cloud networking (MCN), multi cluster, hybrid cloud networking
tool for efficient, secure, policy-enforced connectivity and true multi-tenancy
capabilities. KubeSlice enables enterprise platform teams to reduce infrastructure
costs, cluster/namespace sprawl, avoid complex firewall and gateway configurations
and more.
digest: 1918a98ef6142b2a051e456a658aa507c5bff8c63ab8413b1dd86784d65c6e85
icon: https://kubeslice.io/documentation/open-source/img/kubeslice-logo.svg
keywords:
- multicloud
- multi cloud
- multitenant
- multitenancy
- multi tenant
- multi tenancy
- federated mesh
- federated clusters
- federated k8s
- federated kubernetes
- cluster sprawl
- sprawl
- namespace sprawl
- network policy
- overlay network
- mesh network
- security
- networking
- infrastructure
- application
kubeVersion: '>= 1.19.0-0'
maintainers:
- email: support@avesha.io
name: Avesha
name: kubeslice-controller
type: application
urls:
- assets/avesha/kubeslice-controller-1.0.0.tgz
version: 1.0.0
- annotations: - annotations:
catalog.cattle.io/certified: partner catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: Avesha Kubeslice Controller catalog.cattle.io/display-name: Avesha Kubeslice Controller
@ -21950,6 +22170,52 @@ entries:
- assets/avesha/kubeslice-controller-0.4.2.tgz - assets/avesha/kubeslice-controller-0.4.2.tgz
version: 0.4.2 version: 0.4.2
kubeslice-worker: kubeslice-worker:
- annotations:
catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: Avesha Kubeslice Worker
catalog.cattle.io/kube-version: '>= 1.19.0-0'
catalog.cattle.io/namespace: kubeslice-system
catalog.cattle.io/release-name: kubeslice-worker
apiVersion: v2
appVersion: 1.0.0
created: "2023-06-06T17:23:39.859623891Z"
description: Multi cloud networking (MCN), multi cluster, hybrid cloud networking
tool for efficient, secure, policy-enforced connectivity and true multi-tenancy
capabilities. KubeSlice enables enterprise platform teams to reduce infrastructure
costs, cluster/namespace sprawl, avoid complex firewall and gateway configurations
and more.
digest: e1d8a38a78bd26520048eff178585aebef50f6b46fa0616679561fd05424b49a
icon: https://kubeslice.io/documentation/open-source/img/kubeslice-logo.svg
keywords:
- multicloud
- multi cloud
- multitenant
- multitenancy
- multi tenant
- multi tenancy
- federated mesh
- federated clusters
- federated k8s
- federated kubernetes
- cluster sprawl
- sprawl
- namespace sprawl
- network policy
- overlay network
- mesh network
- security
- networking
- infrastructure
- application
kubeVersion: '>= 1.19.0-0'
maintainers:
- email: support@avesha.io
name: Avesha
name: kubeslice-worker
type: application
urls:
- assets/avesha/kubeslice-worker-1.0.0.tgz
version: 1.0.0
- annotations: - annotations:
catalog.cattle.io/certified: partner catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: Avesha Kubeslice Worker catalog.cattle.io/display-name: Avesha Kubeslice Worker
@ -23977,6 +24243,43 @@ entries:
- assets/minio/minio-operator-4.4.1700.tgz - assets/minio/minio-operator-4.4.1700.tgz
version: 4.4.1700 version: 4.4.1700
mysql: mysql:
- annotations:
catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: MySQL
catalog.cattle.io/kube-version: '>=1.19-0'
catalog.cattle.io/release-name: mysql
category: Database
licenses: Apache-2.0
apiVersion: v2
appVersion: 8.0.33
created: "2023-06-06T17:23:40.645931747Z"
dependencies:
- name: common
repository: file://./charts/common
tags:
- bitnami-common
version: 2.x.x
description: MySQL is a fast, reliable, scalable, and easy to use open source
relational database system. Designed to handle mission-critical, heavy-load
production applications.
digest: 041f279b3d86eba3733332552bd86408447223886ae862c710fad827876d61ed
home: https://bitnami.com
icon: https://www.mysql.com/common/logos/logo-mysql-170x115.png
keywords:
- mysql
- database
- sql
- cluster
- high availability
maintainers:
- name: VMware, Inc.
url: https://github.com/bitnami/charts
name: mysql
sources:
- https://github.com/bitnami/charts/tree/main/bitnami/mysql
urls:
- assets/bitnami/mysql-9.10.2.tgz
version: 9.10.2
- annotations: - annotations:
catalog.cattle.io/certified: partner catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: MySQL catalog.cattle.io/display-name: MySQL
@ -38078,6 +38381,50 @@ entries:
- assets/bitnami/tomcat-10.4.9.tgz - assets/bitnami/tomcat-10.4.9.tgz
version: 10.4.9 version: 10.4.9
traefik: traefik:
- annotations:
artifacthub.io/changes: "- \"release: \U0001F680 publish v23.1.0\"\n- \"feat:
✨ add a warning when labelSelector don't match\"\n- \"feat: add optional `appProtocol`
field on Service ports\"\n- \"feat: remove Traefik Hub v1 integration\"\n-
\"feat: allow specifying service loadBalancerClass\"\n- \"feat: common labels
for all resources\"\n- \"fix: \U0001F41B use k8s version for hpa api version\"\n-
\"fix: \U0001F41B http3 support on traefik v3\"\n- \"fix: use `targetPort`
instead of `port` on ServiceMonitor\"\n- \"doc: added values README via helm-docs
cli\"\n"
catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: Traefik Proxy
catalog.cattle.io/kube-version: '>=1.16.0-0'
catalog.cattle.io/release-name: traefik
apiVersion: v2
appVersion: v2.10.1
created: "2023-06-06T17:23:45.746792079Z"
description: A Traefik based Kubernetes ingress controller
digest: 510b78e49e821674f1e805fbcea686a9f4a783fa8f3102acea1680694076ac97
home: https://traefik.io/
icon: https://raw.githubusercontent.com/traefik/traefik/v2.3/docs/content/assets/img/traefik.logo.png
keywords:
- traefik
- ingress
- networking
kubeVersion: '>=1.16.0-0'
maintainers:
- email: emile@vauge.com
name: emilevauge
- email: daniel.tomcej@gmail.com
name: dtomcej
- email: ldez@traefik.io
name: ldez
- email: michel.loiseleur@traefik.io
name: mloiseleur
- email: charlie.haley@traefik.io
name: charlie-haley
name: traefik
sources:
- https://github.com/traefik/traefik
- https://github.com/traefik/traefik-helm-chart
type: application
urls:
- assets/traefik/traefik-23.1.0.tgz
version: 23.1.0
- annotations: - annotations:
artifacthub.io/changes: | artifacthub.io/changes: |
- "⬆️ Upgrade traefik Docker tag to v2.10.1" - "⬆️ Upgrade traefik Docker tag to v2.10.1"
@ -39326,6 +39673,27 @@ entries:
- assets/universal-crossplane/universal-crossplane-1.2.200100.tgz - assets/universal-crossplane/universal-crossplane-1.2.200100.tgz
version: 1.2.200100 version: 1.2.200100
vals-operator: vals-operator:
- annotations:
catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: Vals-Operator
catalog.cattle.io/kube-version: '>= 1.19.0-0'
catalog.cattle.io/release-name: vals-operator
apiVersion: v2
appVersion: v0.7.3
created: "2023-06-06T17:23:42.435566072Z"
description: This helm chart installs the Digitalis Vals Operator to manage and
sync secrets from supported backends into Kubernetes.
digest: ef55fb174e741db7f191763a0075178251d2ee972db09d38266978f977d5b6ba
icon: https://digitalis.io/wp-content/uploads/2020/06/cropped-Digitalis-512x512-Blue_Digitalis-512x512-Blue-32x32.png
kubeVersion: '>= 1.19.0-0'
maintainers:
- email: info@digitalis.io
name: Digitalis.IO
name: vals-operator
type: application
urls:
- assets/digitalis/vals-operator-0.7.3.tgz
version: 0.7.3
- annotations: - annotations:
catalog.cattle.io/certified: partner catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: Vals-Operator catalog.cattle.io/display-name: Vals-Operator