Charts CI

```
Updated:
  bitnami/cassandra:
    - 10.5.4
  bitnami/postgresql:
    - 12.12.10
  bitnami/wordpress:
    - 17.1.12
  cert-manager/cert-manager:
    - v1.13.1
  f5/nginx-ingress:
    - 1.0.0
  intel/intel-device-plugins-operator:
    - 0.28.0
  intel/intel-device-plugins-qat:
    - 0.28.0
  intel/intel-device-plugins-sgx:
    - 0.28.0
  jenkins/jenkins:
    - 4.6.5
  kong/kong:
    - 2.28.0
  linkerd/linkerd-control-plane:
    - 1.16.2
  yugabyte/yugabyte:
    - 2.14.13
  yugabyte/yugaware:
    - 2.14.13
```
pull/893/head
github-actions[bot] 2023-09-27 13:46:53 +00:00
parent 324d3ffe99
commit 0a74378cf4
119 changed files with 1225 additions and 3057 deletions

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

BIN
assets/kong/kong-2.28.0.tgz Normal file

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -35,4 +35,4 @@ maintainers:
name: cassandra
sources:
- https://github.com/bitnami/charts/tree/main/bitnami/cassandra
version: 10.5.3
version: 10.5.4

File diff suppressed because it is too large Load Diff

View File

@ -683,10 +683,10 @@ metrics:
##
image:
registry: docker.io
pullPolicy: IfNotPresent
repository: bitnami/cassandra-exporter
tag: 2.3.8-debian-11-r394
digest: ""
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/

View File

@ -10,7 +10,7 @@ annotations:
- name: postgres-exporter
image: docker.io/bitnami/postgres-exporter:0.14.0-debian-11-r2
- name: postgresql
image: docker.io/bitnami/postgresql:15.4.0-debian-11-r44
image: docker.io/bitnami/postgresql:15.4.0-debian-11-r45
licenses: Apache-2.0
apiVersion: v2
appVersion: 15.4.0
@ -38,4 +38,4 @@ maintainers:
name: postgresql
sources:
- https://github.com/bitnami/charts/tree/main/bitnami/postgresql
version: 12.12.7
version: 12.12.10

View File

@ -100,7 +100,7 @@ kubectl delete pvc -l release=my-release
| ---------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------- |
| `image.registry` | PostgreSQL image registry | `docker.io` |
| `image.repository` | PostgreSQL image repository | `bitnami/postgresql` |
| `image.tag` | PostgreSQL image tag (immutable tags are recommended) | `15.4.0-debian-11-r44` |
| `image.tag` | PostgreSQL image tag (immutable tags are recommended) | `15.4.0-debian-11-r45` |
| `image.digest` | PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `image.pullPolicy` | PostgreSQL image pull policy | `IfNotPresent` |
| `image.pullSecrets` | Specify image pull secrets | `[]` |

View File

@ -38,12 +38,14 @@ spec:
annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 12 }}
{{- end }}
spec:
{{- include "postgresql.v1.imagePullSecrets" . | nindent 10 }}
{{- if .Values.backup.cronjob.nodeSelector }}
nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.backup.cronjob.nodeSelector "context" $) | nindent 12 }}
{{- end }}
containers:
- name: {{ include "postgresql.v1.primary.fullname" . }}-pgdumpall
image: {{ include "postgresql.v1.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
env:
- name: PGUSER
{{- if .Values.auth.enablePostgresUser }}
@ -64,7 +66,7 @@ spec:
- name: PGHOST
value: {{ include "postgresql.v1.primary.fullname" . }}
- name: PGPORT
value: {{ .Values.containerPorts.postgresql | quote }}
value: {{ include "postgresql.v1.service.port" . | quote }}
- name: PGDUMP_DIR
value: {{ .Values.backup.cronjob.storage.mountPath }}
{{- if .Values.tls.enabled }}

View File

@ -98,7 +98,7 @@ diagnosticMode:
image:
registry: docker.io
repository: bitnami/postgresql
tag: 15.4.0-debian-11-r44
tag: 15.4.0-debian-11-r45
digest: ""
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'

View File

@ -6,11 +6,11 @@ annotations:
category: CMS
images: |
- name: apache-exporter
image: docker.io/bitnami/apache-exporter:1.0.1-debian-11-r53
image: docker.io/bitnami/apache-exporter:1.0.1-debian-11-r56
- name: os-shell
image: docker.io/bitnami/os-shell:11-debian-11-r75
image: docker.io/bitnami/os-shell:11-debian-11-r77
- name: wordpress
image: docker.io/bitnami/wordpress:6.3.1-debian-11-r21
image: docker.io/bitnami/wordpress:6.3.1-debian-11-r25
licenses: Apache-2.0
apiVersion: v2
appVersion: 6.3.1
@ -47,4 +47,4 @@ maintainers:
name: wordpress
sources:
- https://github.com/bitnami/charts/tree/main/bitnami/wordpress
version: 17.1.10
version: 17.1.12

View File

@ -82,7 +82,7 @@ The command removes all the Kubernetes components associated with the chart and
| ------------------- | --------------------------------------------------------------------------------------------------------- | --------------------- |
| `image.registry` | WordPress image registry | `docker.io` |
| `image.repository` | WordPress image repository | `bitnami/wordpress` |
| `image.tag` | WordPress image tag (immutable tags are recommended) | `6.3.1-debian-11-r21` |
| `image.tag` | WordPress image tag (immutable tags are recommended) | `6.3.1-debian-11-r25` |
| `image.digest` | WordPress image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `image.pullPolicy` | WordPress image pull policy | `IfNotPresent` |
| `image.pullSecrets` | WordPress image pull secrets | `[]` |
@ -249,7 +249,7 @@ The command removes all the Kubernetes components associated with the chart and
| `volumePermissions.enabled` | Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup` | `false` |
| `volumePermissions.image.registry` | OS Shell + Utility image registry | `docker.io` |
| `volumePermissions.image.repository` | OS Shell + Utility image repository | `bitnami/os-shell` |
| `volumePermissions.image.tag` | OS Shell + Utility image tag (immutable tags are recommended) | `11-debian-11-r75` |
| `volumePermissions.image.tag` | OS Shell + Utility image tag (immutable tags are recommended) | `11-debian-11-r77` |
| `volumePermissions.image.digest` | OS Shell + Utility image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `volumePermissions.image.pullPolicy` | OS Shell + Utility image pull policy | `IfNotPresent` |
| `volumePermissions.image.pullSecrets` | OS Shell + Utility image pull secrets | `[]` |
@ -281,7 +281,7 @@ The command removes all the Kubernetes components associated with the chart and
| `metrics.enabled` | Start a sidecar prometheus exporter to expose metrics | `false` |
| `metrics.image.registry` | Apache exporter image registry | `docker.io` |
| `metrics.image.repository` | Apache exporter image repository | `bitnami/apache-exporter` |
| `metrics.image.tag` | Apache exporter image tag (immutable tags are recommended) | `1.0.1-debian-11-r53` |
| `metrics.image.tag` | Apache exporter image tag (immutable tags are recommended) | `1.0.1-debian-11-r56` |
| `metrics.image.digest` | Apache exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `metrics.image.pullPolicy` | Apache exporter image pull policy | `IfNotPresent` |
| `metrics.image.pullSecrets` | Apache exporter image pull secrets | `[]` |

View File

@ -76,7 +76,7 @@ diagnosticMode:
image:
registry: docker.io
repository: bitnami/wordpress
tag: 6.3.1-debian-11-r21
tag: 6.3.1-debian-11-r25
digest: ""
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
@ -766,7 +766,7 @@ volumePermissions:
image:
registry: docker.io
repository: bitnami/os-shell
tag: 11-debian-11-r75
tag: 11-debian-11-r77
digest: ""
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
@ -860,7 +860,7 @@ metrics:
image:
registry: docker.io
repository: bitnami/apache-exporter
tag: 1.0.1-debian-11-r53
tag: 1.0.1-debian-11-r56
digest: ""
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.

View File

@ -10,7 +10,7 @@ annotations:
catalog.cattle.io/namespace: cert-manager
catalog.cattle.io/release-name: cert-manager
apiVersion: v1
appVersion: v1.13.0
appVersion: v1.13.1
description: A Helm chart for cert-manager
home: https://github.com/cert-manager/cert-manager
icon: https://raw.githubusercontent.com/cert-manager/cert-manager/d53c0b9270f8cd90d908460d69502694e1838f5f/logo/logo-small.png
@ -27,4 +27,4 @@ maintainers:
name: cert-manager
sources:
- https://github.com/cert-manager/cert-manager
version: v1.13.0
version: v1.13.1

View File

@ -19,7 +19,7 @@ Before installing the chart, you must first install the cert-manager CustomResou
This is performed in a separate step to allow you to easily uninstall and reinstall cert-manager without deleting your installed custom resources.
```bash
$ kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.13.0/cert-manager.crds.yaml
$ kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.13.1/cert-manager.crds.yaml
```
To install the chart with the release name `my-release`:
@ -29,7 +29,7 @@ To install the chart with the release name `my-release`:
$ helm repo add jetstack https://charts.jetstack.io
## Install the cert-manager helm chart
$ helm install my-release --namespace cert-manager --version v1.13.0 jetstack/cert-manager
$ helm install my-release --namespace cert-manager --version v1.13.1 jetstack/cert-manager
```
In order to begin issuing certificates, you will need to set up a ClusterIssuer
@ -65,7 +65,7 @@ If you want to completely uninstall cert-manager from your cluster, you will als
delete the previously installed CustomResourceDefinition resources:
```console
$ kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.13.0/cert-manager.crds.yaml
$ kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.13.1/cert-manager.crds.yaml
```
## Configuration
@ -86,7 +86,7 @@ The following table lists the configurable parameters of the cert-manager chart
| `global.leaderElection.retryPeriod` | The duration the clients should wait between attempting acquisition and renewal of a leadership | |
| `installCRDs` | If true, CRD resources will be installed as part of the Helm chart. If enabled, when uninstalling CRD resources will be deleted causing all installed custom resources to be DELETED | `false` |
| `image.repository` | Image repository | `quay.io/jetstack/cert-manager-controller` |
| `image.tag` | Image tag | `v1.13.0` |
| `image.tag` | Image tag | `v1.13.1` |
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
| `replicaCount` | Number of cert-manager replicas | `1` |
| `clusterResourceNamespace` | Override the namespace used to store DNS provider credentials etc. for ClusterIssuer resources | Same namespace as cert-manager pod |
@ -171,7 +171,7 @@ The following table lists the configurable parameters of the cert-manager chart
| `webhook.tolerations` | Node tolerations for webhook pod assignment | `[]` |
| `webhook.topologySpreadConstraints` | Topology spread constraints for webhook pod assignment | `[]` |
| `webhook.image.repository` | Webhook image repository | `quay.io/jetstack/cert-manager-webhook` |
| `webhook.image.tag` | Webhook image tag | `v1.13.0` |
| `webhook.image.tag` | Webhook image tag | `v1.13.1` |
| `webhook.image.pullPolicy` | Webhook image pull policy | `IfNotPresent` |
| `webhook.securePort` | The port that the webhook should listen on for requests. | `10250` |
| `webhook.securityContext` | Security context for webhook pod assignment | refer to [Default Security Contexts](#default-security-contexts) |
@ -210,13 +210,13 @@ The following table lists the configurable parameters of the cert-manager chart
| `cainjector.tolerations` | Node tolerations for cainjector pod assignment | `[]` |
| `cainjector.topologySpreadConstraints` | Topology spread constraints for cainjector pod assignment | `[]` |
| `cainjector.image.repository` | cainjector image repository | `quay.io/jetstack/cert-manager-cainjector` |
| `cainjector.image.tag` | cainjector image tag | `v1.13.0` |
| `cainjector.image.tag` | cainjector image tag | `v1.13.1` |
| `cainjector.image.pullPolicy` | cainjector image pull policy | `IfNotPresent` |
| `cainjector.securityContext` | Security context for cainjector pod assignment | refer to [Default Security Contexts](#default-security-contexts) |
| `cainjector.containerSecurityContext` | Security context to be set on cainjector component container | refer to [Default Security Contexts](#default-security-contexts) |
| `cainjector.enableServiceLinks` | Indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. | `false` |
| `acmesolver.image.repository` | acmesolver image repository | `quay.io/jetstack/cert-manager-acmesolver` |
| `acmesolver.image.tag` | acmesolver image tag | `v1.13.0` |
| `acmesolver.image.tag` | acmesolver image tag | `v1.13.1` |
| `acmesolver.image.pullPolicy` | acmesolver image pull policy | `IfNotPresent` |
| `startupapicheck.enabled` | Toggles whether the startupapicheck Job should be installed | `true` |
| `startupapicheck.securityContext` | Security context for startupapicheck pod assignment | refer to [Default Security Contexts](#default-security-contexts) |
@ -232,7 +232,7 @@ The following table lists the configurable parameters of the cert-manager chart
| `startupapicheck.tolerations` | Node tolerations for startupapicheck pod assignment | `[]` |
| `startupapicheck.podLabels` | Optional additional labels to add to the startupapicheck Pods | `{}` |
| `startupapicheck.image.repository` | startupapicheck image repository | `quay.io/jetstack/cert-manager-ctl` |
| `startupapicheck.image.tag` | startupapicheck image tag | `v1.13.0` |
| `startupapicheck.image.tag` | startupapicheck image tag | `v1.13.1` |
| `startupapicheck.image.pullPolicy` | startupapicheck image pull policy | `IfNotPresent` |
| `startupapicheck.serviceAccount.create` | If `true`, create a new service account for the startupapicheck component | `true` |
| `startupapicheck.serviceAccount.name` | Service account for the startupapicheck component to be used. If not set and `startupapicheck.serviceAccount.create` is `true`, a name is generated using the fullname template | |

View File

@ -4,10 +4,10 @@ annotations:
catalog.cattle.io/kube-version: '>= 1.22.0-0'
catalog.cattle.io/release-name: nginx-ingress
apiVersion: v2
appVersion: 3.2.1
appVersion: 3.3.0
description: NGINX Ingress Controller
home: https://github.com/nginxinc/kubernetes-ingress
icon: https://raw.githubusercontent.com/nginxinc/kubernetes-ingress/v3.2.1/deployments/helm-chart/chart-icon.png
icon: https://raw.githubusercontent.com/nginxinc/kubernetes-ingress/v3.3.0/deployments/helm-chart/chart-icon.png
keywords:
- ingress
- nginx
@ -17,6 +17,6 @@ maintainers:
name: nginxinc
name: nginx-ingress
sources:
- https://github.com/nginxinc/kubernetes-ingress/tree/v3.2.1/deployments/helm-chart
- https://github.com/nginxinc/kubernetes-ingress/tree/v3.3.0/deployments/helm-chart
type: application
version: 0.18.1
version: 1.0.0

View File

@ -6,20 +6,35 @@ This chart deploys the NGINX Ingress Controller in your Kubernetes cluster.
## Prerequisites
- A [Kubernetes Version Supported by the Ingress Controller](https://docs.nginx.com/nginx-ingress-controller/technical-specifications/#supported-kubernetes-versions)
- A [Kubernetes Version Supported by the Ingress
Controller](https://docs.nginx.com/nginx-ingress-controller/technical-specifications/#supported-kubernetes-versions)
- Helm 3.0+.
- If youd like to use NGINX Plus:
- To pull from the F5 Container registry, configure a docker registry secret using your JWT token from the MyF5 portal by following the instructions from [here](https://docs.nginx.com/nginx-ingress-controller/installation/using-the-jwt-token-docker-secret). Make sure to specify the secret using `controller.serviceAccount.imagePullSecretName` parameter.
- Alternatively, pull an Ingress Controller image with NGINX Plus and push it to your private registry by following the instructions from [here](https://docs.nginx.com/nginx-ingress-controller/installation/pulling-ingress-controller-image).
- Alternatively, you can build an Ingress Controller image with NGINX Plus and push it to your private registry by following the instructions from [here](https://docs.nginx.com/nginx-ingress-controller/installation/building-ingress-controller-image).
- To pull from the F5 Container registry, configure a docker registry secret using your JWT token from the MyF5 portal
by following the instructions from
[here](https://docs.nginx.com/nginx-ingress-controller/installation/using-the-jwt-token-docker-secret). Make sure to
specify the secret using `controller.serviceAccount.imagePullSecretName` parameter.
- Alternatively, pull an Ingress Controller image with NGINX Plus and push it to your private registry by following
the instructions from
[here](https://docs.nginx.com/nginx-ingress-controller/installation/pulling-ingress-controller-image).
- Alternatively, you can build an Ingress Controller image with NGINX Plus and push it to your private registry by
following the instructions from
[here](https://docs.nginx.com/nginx-ingress-controller/installation/building-ingress-controller-image).
- Update the `controller.image.repository` field of the `values-plus.yaml` accordingly.
- If youd like to use App Protect DoS, please install App Protect DoS Arbitrator [helm chart](https://github.com/nginxinc/nap-dos-arbitrator-helm-chart). Make sure to install in the same namespace as the NGINX Ingress Controller. Note that if you install multiple NGINX Ingress Controllers in the same namespace, they will need to share the same Arbitrator because it is not possible to install more than one Arbitrator in a single namespace.
- If youd like to use App Protect DoS, please install App Protect DoS Arbitrator [helm
chart](https://github.com/nginxinc/nap-dos-arbitrator-helm-chart). Make sure to install in the same namespace as the
NGINX Ingress Controller. Note that if you install multiple NGINX Ingress Controllers in the same namespace, they will
need to share the same Arbitrator because it is not possible to install more than one Arbitrator in a single
namespace.
## CRDs
By default, the Ingress Controller requires a number of custom resource definitions (CRDs) installed in the cluster. The Helm client will install those CRDs. If the CRDs are not installed, the Ingress Controller pods will not become `Ready`.
By default, the Ingress Controller requires a number of custom resource definitions (CRDs) installed in the cluster. The
Helm client will install those CRDs. If the CRDs are not installed, the Ingress Controller pods will not become `Ready`.
If you do not use the custom resources that require those CRDs (which corresponds to `controller.enableCustomResources` set to `false` and `controller.appprotect.enable` set to `false` and `controller.appprotectdos.enable` set to `false`), the installation of the CRDs can be skipped by specifying `--skip-crds` for the helm install command.
If you do not use the custom resources that require those CRDs (which corresponds to `controller.enableCustomResources`
set to `false` and `controller.appprotect.enable` set to `false` and `controller.appprotectdos.enable` set to `false`),
the installation of the CRDs can be skipped by specifying `--skip-crds` for the helm install command.
### Upgrading the CRDs
@ -31,9 +46,11 @@ kubectl apply -f crds/
> **Note**
>
> The following warning is expected and can be ignored: `Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply`.
> The following warning is expected and can be ignored: `Warning: kubectl apply should be used on resource created by
> either kubectl create --save-config or kubectl apply`.
>
> Make sure to check the [release notes](https://www.github.com/nginxinc/kubernetes-ingress/releases)for a new release for any special upgrade procedures.
> Make sure to check the [release notes](https://www.github.com/nginxinc/kubernetes-ingress/releases)for a new release
> for any special upgrade procedures.
### Uninstalling the CRDs
@ -45,7 +62,9 @@ kubectl delete -f crds/
> **Note**
>
> This command will delete all the corresponding custom resources in your cluster across all namespaces. Please ensure there are no custom resources that you want to keep and there are no other Ingress Controller releases running in the cluster.
> This command will delete all the corresponding custom resources in your cluster across all namespaces. Please ensure
> there are no custom resources that you want to keep and there are no other Ingress Controller releases running in the
> cluster.
## Managing the Chart via OCI Registry
@ -56,25 +75,29 @@ To install the chart with the release name my-release (my-release is the name th
For NGINX:
```console
helm install my-release oci://ghcr.io/nginxinc/charts/nginx-ingress --version 0.18.1
helm install my-release oci://ghcr.io/nginxinc/charts/nginx-ingress --version 1.0.0
```
For NGINX Plus: (assuming you have pushed the Ingress Controller image `nginx-plus-ingress` to your private registry `myregistry.example.com`)
For NGINX Plus: (assuming you have pushed the Ingress Controller image `nginx-plus-ingress` to your private registry
`myregistry.example.com`)
```console
helm install my-release oci://ghcr.io/nginxinc/charts/nginx-ingress --version 0.18.1 --set controller.image.repository=myregistry.example.com/nginx-plus-ingress --set controller.nginxplus=true
helm install my-release oci://ghcr.io/nginxinc/charts/nginx-ingress --version 1.0.0 --set controller.image.repository=myregistry.example.com/nginx-plus-ingress --set controller.nginxplus=true
```
This will install the latest `edge` version of the Ingress Controller from GitHub Container Registry. If you prefer to use Docker Hub, you can replace `ghcr.io/nginxinc/charts/nginx-ingress` with `registry-1.docker.io/nginxcharts/nginx-ingress`.
This will install the latest `edge` version of the Ingress Controller from GitHub Container Registry. If you prefer to
use Docker Hub, you can replace `ghcr.io/nginxinc/charts/nginx-ingress` with
`registry-1.docker.io/nginxcharts/nginx-ingress`.
### Upgrading the Chart
Helm does not upgrade the CRDs during a release upgrade. Before you upgrade a release, see [Upgrading the CRDs](#upgrading-the-crds).
Helm does not upgrade the CRDs during a release upgrade. Before you upgrade a release, see [Upgrading the
CRDs](#upgrading-the-crds).
To upgrade the release `my-release`:
```console
helm upgrade my-release oci://ghcr.io/nginxinc/charts/nginx-ingress --version 0.18.1
helm upgrade my-release oci://ghcr.io/nginxinc/charts/nginx-ingress --version 1.0.0
```
### Uninstalling the Chart
@ -87,12 +110,14 @@ helm uninstall my-release
The command removes all the Kubernetes components associated with the release and deletes the release.
Uninstalling the release does not remove the CRDs. To remove the CRDs, see [Uninstalling the CRDs](#uninstalling-the-crds).
Uninstalling the release does not remove the CRDs. To remove the CRDs, see [Uninstalling the
CRDs](#uninstalling-the-crds).
### Edge Version
To test the latest changes in NGINX Ingress Controller before a new release, you can install the `edge` version. This version is built from the `main` branch of the NGINX Ingress Controller repository.
You can install the `edge` version by specifying the `--version` flag with the value `0.0.0-edge`:
To test the latest changes in NGINX Ingress Controller before a new release, you can install the `edge` version. This
version is built from the `main` branch of the NGINX Ingress Controller repository. You can install the `edge` version
by specifying the `--version` flag with the value `0.0.0-edge`:
```console
helm install my-release oci://ghcr.io/nginxinc/charts/nginx-ingress --version 0.0.0-edge
@ -106,12 +131,14 @@ helm install my-release oci://ghcr.io/nginxinc/charts/nginx-ingress --version 0.
### Pulling the Chart
This step is required if you're installing the chart using its sources. Additionally, the step is also required for managing the custom resource definitions (CRDs), which the Ingress Controller requires by default, or for upgrading/deleting the CRDs.
This step is required if you're installing the chart using its sources. Additionally, the step is also required for
managing the custom resource definitions (CRDs), which the Ingress Controller requires by default, or for
upgrading/deleting the CRDs.
1. Pull the chart sources:
```console
helm pull oci://ghcr.io/nginxinc/charts/nginx-ingress --untar --version 0.18.1
helm pull oci://ghcr.io/nginxinc/charts/nginx-ingress --untar --version 1.0.0
```
2. Change your working directory to nginx-ingress:
@ -136,11 +163,13 @@ For NGINX Plus:
helm install my-release -f values-plus.yaml .
```
The command deploys the Ingress Controller in your Kubernetes cluster in the default configuration. The configuration section lists the parameters that can be configured during installation.
The command deploys the Ingress Controller in your Kubernetes cluster in the default configuration. The configuration
section lists the parameters that can be configured during installation.
### Upgrading the Chart
Helm does not upgrade the CRDs during a release upgrade. Before you upgrade a release, see [Upgrading the CRDs](#upgrading-the-crds).
Helm does not upgrade the CRDs during a release upgrade. Before you upgrade a release, see [Upgrading the
CRDs](#upgrading-the-crds).
To upgrade the release `my-release`:
@ -158,13 +187,18 @@ helm uninstall my-release
The command removes all the Kubernetes components associated with the release and deletes the release.
Uninstalling the release does not remove the CRDs. To remove the CRDs, see [Uninstalling the CRDs](#uninstalling-the-crds).
Uninstalling the release does not remove the CRDs. To remove the CRDs, see [Uninstalling the
CRDs](#uninstalling-the-crds).
## Running Multiple Ingress Controllers
If you are running multiple Ingress Controller releases in your cluster with enabled custom resources, the releases will share a single version of the CRDs. As a result, make sure that the Ingress Controller versions match the version of the CRDs. Additionally, when uninstalling a release, ensure that you dont remove the CRDs until there are no other Ingress Controller releases running in the cluster.
If you are running multiple Ingress Controller releases in your cluster with enabled custom resources, the releases will
share a single version of the CRDs. As a result, make sure that the Ingress Controller versions match the version of the
CRDs. Additionally, when uninstalling a release, ensure that you dont remove the CRDs until there are no other Ingress
Controller releases running in the cluster.
See [running multiple Ingress Controllers](https://docs.nginx.com/nginx-ingress-controller/installation/running-multiple-ingress-controllers/) for more details.
See [running multiple Ingress Controllers](https://docs.nginx.com/nginx-ingress-controller/installation/running-multiple-ingress-controllers/)
for more details.
## Configuration
@ -183,7 +217,7 @@ The following tables lists the configurable parameters of the NGINX Ingress Cont
|`controller.logLevel` | The log level of the Ingress Controller. | 1 |
|`controller.image.digest` | The image digest of the Ingress Controller. | None |
|`controller.image.repository` | The image repository of the Ingress Controller. | nginx/nginx-ingress |
|`controller.image.tag` | The tag of the Ingress Controller image. | 3.2.1 |
|`controller.image.tag` | The tag of the Ingress Controller image. | 3.3.0 |
|`controller.image.pullPolicy` | The pull policy for the Ingress Controller image. | IfNotPresent |
|`controller.lifecycle` | The lifecycle of the Ingress Controller pods. | {} |
|`controller.customConfigMap` | The name of the custom ConfigMap used by the Ingress Controller. If set, then the default config is ignored. | "" |
@ -209,15 +243,17 @@ The following tables lists the configurable parameters of the NGINX Ingress Cont
|`controller.extraContainers` | Extra (eg. sidecar) containers for the Ingress Controller pods. | [] |
|`controller.resources` | The resources of the Ingress Controller pods. | requests: cpu=100m,memory=128Mi |
|`controller.replicaCount` | The number of replicas of the Ingress Controller deployment. | 1 |
|`controller.ingressClass` | A class of the Ingress Controller. An IngressClass resource with the name equal to the class must be deployed. Otherwise, the Ingress Controller will fail to start. The Ingress Controller only processes resources that belong to its class - i.e. have the "ingressClassName" field resource equal to the class. The Ingress Controller processes all the VirtualServer/VirtualServerRoute/TransportServer resources that do not have the "ingressClassName" field for all versions of kubernetes. | nginx |
|`controller.setAsDefaultIngress` | New Ingresses without an `"ingressClassName"` field specified will be assigned the class specified in `controller.ingressClass`. | false |
|`controller.ingressClass.name` | A class of the Ingress Controller. An IngressClass resource with the name equal to the class must be deployed. Otherwise, the Ingress Controller will fail to start. The Ingress Controller only processes resources that belong to its class - i.e. have the "ingressClassName" field resource equal to the class. The Ingress Controller processes all the VirtualServer/VirtualServerRoute/TransportServer resources that do not have the "ingressClassName" field for all versions of Kubernetes. | nginx |
|`controller.ingressClass.create` | Creates a new IngressClass object with the name `controller.ingressClass.name`. Set to `false` to use an existing ingressClass created using `kubectl` with the same name. If you use `helm upgrade`, do not change the values from the previous release as helm will delete IngressClass objects managed by helm. If you are upgrading from a release earlier than 3.3.0, do not set the value to false. | true |
|`controller.ingressClass.setAsDefaultIngress` | New Ingresses without an `"ingressClassName"` field specified will be assigned the class specified in `controller.ingressClass.name`. Requires `controller.ingressClass.create`. | false |
|`controller.watchNamespace` | Comma separated list of namespaces the Ingress Controller should watch for resources. By default the Ingress Controller watches all namespaces. Mutually exclusive with `controller.watchNamespaceLabel`. Please note that if configuring multiple namespaces using the Helm cli `--set` option, the string needs to wrapped in double quotes and the commas escaped using a backslash - e.g. `--set controller.watchNamespace="default\,nginx-ingress"`. | "" |
|`controller.watchNamespaceLabel` | Configures the Ingress Controller to watch only those namespaces with label foo=bar. By default the Ingress Controller watches all namespaces. Mutually exclusive with `controller.watchNamespace`. | "" |
|`controller.watchSecretNamespace` | Comma separated list of namespaces the Ingress Controller should watch for resources of type Secret. If this arg is not configured, the Ingress Controller watches the same namespaces for all resources. See `controller.watchNamespace` and `controller.watchNamespaceLabel`. Please note that if configuring multiple namespaces using the Helm cli `--set` option, the string needs to wrapped in double quotes and the commas escaped using a backslash - e.g. `--set controller.watchSecretNamespace="default\,nginx-ingress"`. | "" |
|`controller.enableCustomResources` | Enable the custom resources. | true |
|`controller.enablePreviewPolicies` | Enable preview policies. This parameter is deprecated. To enable OIDC Policies please use `controller.enableOIDC` instead. | false |
|`controller.enableOIDC` | Enable OIDC policies. | false |
|`controller.enableTLSPassthrough` | Enable TLS Passthrough on port 443. Requires `controller.enableCustomResources`. | false |
|`controller.enableTLSPassthrough` | Enable TLS Passthrough on default port 443. Requires `controller.enableCustomResources`. | false |
|`controller.tlsPassThroughPort` | Set the port for the TLS Passthrough. Requires `controller.enableCustomResources` and `controller.enableTLSPassthrough`. | 443 |
|`controller.enableCertManager` | Enable x509 automated certificate management for VirtualServer resources using cert-manager (cert-manager.io). Requires `controller.enableCustomResources`. | false |
|`controller.enableExternalDNS` | Enable integration with ExternalDNS for configuring public DNS entries for VirtualServer resources using [ExternalDNS](https://github.com/kubernetes-sigs/external-dns). Requires `controller.enableCustomResources`. | false |
|`controller.globalConfiguration.create` | Creates the GlobalConfiguration custom resource. Requires `controller.enableCustomResources`. | false |
@ -236,6 +272,7 @@ The following tables lists the configurable parameters of the NGINX Ingress Cont
|`controller.service.extraLabels` | The extra labels of the service. | {} |
|`controller.service.loadBalancerIP` | The static IP address for the load balancer. Requires `controller.service.type` set to `LoadBalancer`. The cloud provider must support this feature. | "" |
|`controller.service.externalIPs` | The list of external IPs for the Ingress Controller service. | [] |
|`controller.service.clusterIP` | The clusterIP for the Ingress Controller service, autoassigned if not specified. | "" |
|`controller.service.loadBalancerSourceRanges` | The IP ranges (CIDR) that are allowed to access the load balancer. Requires `controller.service.type` set to `LoadBalancer`. The cloud provider must support this feature. | [] |
|`controller.service.name` | The name of the service. | Autogenerated |
|`controller.service.customPorts` | A list of custom ports to expose through the Ingress Controller service. Follows the conventional Kubernetes yaml syntax for service ports. | [] |
@ -301,5 +338,7 @@ The following tables lists the configurable parameters of the NGINX Ingress Cont
## Notes
- The values-icp.yaml file is used for deploying the Ingress Controller on IBM Cloud Private. See the [blog post](https://www.nginx.com/blog/nginx-ingress-controller-ibm-cloud-private/) for more details.
- The values-nsm.yaml file is used for deploying the Ingress Controller with NGINX Service Mesh. See the NGINX Service Mesh [docs](https://docs.nginx.com/nginx-service-mesh/tutorials/kic/deploy-with-kic/) for more details.
- The values-icp.yaml file is used for deploying the Ingress Controller on IBM Cloud Private. See the [blog
post](https://www.nginx.com/blog/nginx-ingress-controller-ibm-cloud-private/) for more details.
- The values-nsm.yaml file is used for deploying the Ingress Controller with NGINX Service Mesh. See the NGINX Service
Mesh [docs](https://docs.nginx.com/nginx-service-mesh/tutorials/kic/deploy-with-kic/) for more details.

View File

@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.12.1
controller-gen.kubebuilder.io/version: v0.13.0
name: dosprotectedresources.appprotectdos.f5.com
spec:
group: appprotectdos.f5.com

View File

@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.12.1
controller-gen.kubebuilder.io/version: v0.13.0
name: dnsendpoints.externaldns.nginx.org
spec:
group: externaldns.nginx.org

View File

@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.12.1
controller-gen.kubebuilder.io/version: v0.13.0
name: globalconfigurations.k8s.nginx.org
spec:
group: k8s.nginx.org
@ -45,5 +45,7 @@ spec:
type: integer
protocol:
type: string
ssl:
type: boolean
served: true
storage: true

View File

@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.12.1
controller-gen.kubebuilder.io/version: v0.13.0
name: policies.k8s.nginx.org
spec:
group: k8s.nginx.org

View File

@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.12.1
controller-gen.kubebuilder.io/version: v0.13.0
name: transportservers.k8s.nginx.org
spec:
group: k8s.nginx.org

View File

@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.12.1
controller-gen.kubebuilder.io/version: v0.13.0
name: virtualserverroutes.k8s.nginx.org
spec:
group: k8s.nginx.org

View File

@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.12.1
controller-gen.kubebuilder.io/version: v0.13.0
name: virtualservers.k8s.nginx.org
spec:
group: k8s.nginx.org
@ -97,6 +97,14 @@ spec:
internalRoute:
description: InternalRoute allows for the configuration of internal routing.
type: boolean
listener:
description: Listener references a custom http and/or https listener defined in GlobalConfiguration.
type: object
properties:
http:
type: string
https:
type: string
policies:
type: array
items:

View File

@ -64,9 +64,13 @@ app.kubernetes.io/managed-by: {{ .Release.Service }}
Selector labels
*/}}
{{- define "nginx-ingress.selectorLabels" -}}
{{- if .Values.controller.selectorLabels -}}
{{ toYaml .Values.controller.selectorLabels }}
{{- else -}}
app.kubernetes.io/name: {{ include "nginx-ingress.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{- end -}}
{{- end -}}
{{/*
Expand the name of the configmap.
@ -125,3 +129,7 @@ Expand image name.
{{- printf "%s:%s" .Values.controller.image.repository (include "nginx-ingress.tag" .) -}}
{{- end -}}
{{- end -}}
{{- define "nginx-ingress.prometheus.serviceName" -}}
{{- printf "%s-%s" (include "nginx-ingress.fullname" .) "prometheus-service" -}}
{{- end -}}

View File

@ -85,12 +85,14 @@ spec:
{{ toYaml .Values.controller.lifecycle | indent 10 }}
{{- end }}
ports:
- name: http
containerPort: 80
hostPort: 80
- name: https
containerPort: 443
hostPort: 443
{{- range $key, $value := .Values.controller.containerPort }}
- name: {{ $key }}
containerPort: {{ $value }}
protocol: TCP
{{- if and $.Values.controller.hostPort.enable (index $.Values.controller.hostPort $key) }}
hostPort: {{ index $.Values.controller.hostPort $key }}
{{- end }}
{{- end }}
{{ if .Values.controller.customPorts }}
{{ toYaml .Values.controller.customPorts | indent 8 }}
{{ end }}
@ -178,7 +180,7 @@ spec:
{{ else if and (.Values.controller.defaultTLS.cert) (.Values.controller.defaultTLS.key) }}
- -default-server-tls-secret=$(POD_NAMESPACE)/{{ include "nginx-ingress.defaultTLSName" . }}
{{- end }}
- -ingress-class={{ .Values.controller.ingressClass }}
- -ingress-class={{ .Values.controller.ingressClass.name }}
{{- if .Values.controller.watchNamespace }}
- -watch-namespace={{ .Values.controller.watchNamespace }}
{{- end }}
@ -228,6 +230,9 @@ spec:
- -disable-ipv6={{ .Values.controller.disableIPV6 }}
{{- if .Values.controller.enableCustomResources }}
- -enable-tls-passthrough={{ .Values.controller.enableTLSPassthrough }}
{{ if .Values.controller.enableTLSPassthrough }}
- -tls-passthrough-port={{ .Values.controller.tlsPassthroughPort }}
{{ end }}
- -enable-preview-policies={{ .Values.controller.enablePreviewPolicies }}
- -enable-cert-manager={{ .Values.controller.enableCertManager }}
- -enable-oidc={{ .Values.controller.enableOIDC }}

View File

@ -92,10 +92,14 @@ spec:
{{ toYaml .Values.controller.lifecycle | indent 10 }}
{{- end }}
ports:
- name: http
containerPort: 80
- name: https
containerPort: 443
{{- range $key, $value := .Values.controller.containerPort }}
- name: {{ $key }}
containerPort: {{ $value }}
protocol: TCP
{{- if and $.Values.controller.hostPort.enable (index $.Values.controller.hostPort $key) }}
hostPort: {{ index $.Values.controller.hostPort $key }}
{{- end }}
{{- end }}
{{- if .Values.controller.customPorts }}
{{ toYaml .Values.controller.customPorts | indent 8 }}
{{- end }}
@ -183,7 +187,7 @@ spec:
{{ else if and (.Values.controller.defaultTLS.cert) (.Values.controller.defaultTLS.key) }}
- -default-server-tls-secret=$(POD_NAMESPACE)/{{ include "nginx-ingress.defaultTLSName" . }}
{{- end }}
- -ingress-class={{ .Values.controller.ingressClass }}
- -ingress-class={{ .Values.controller.ingressClass.name }}
{{- if .Values.controller.watchNamespace }}
- -watch-namespace={{ .Values.controller.watchNamespace }}
{{- end }}
@ -233,6 +237,9 @@ spec:
- -disable-ipv6={{ .Values.controller.disableIPV6 }}
{{- if .Values.controller.enableCustomResources }}
- -enable-tls-passthrough={{ .Values.controller.enableTLSPassthrough }}
{{ if .Values.controller.enableTLSPassthrough }}
- -tls-passthrough-port={{ .Values.controller.tlsPassthroughPort }}
{{ end }}
- -enable-preview-policies={{ .Values.controller.enablePreviewPolicies }}
- -enable-cert-manager={{ .Values.controller.enableCertManager }}
- -enable-oidc={{ .Values.controller.enableOIDC }}

View File

@ -1,4 +1,4 @@
{{- if and .Values.controller.autoscaling.enabled (eq .Values.controller.kind "deployment") (semverCompare ">=1.23.0" .Capabilities.KubeVersion.Version) -}}
{{- if and .Values.controller.autoscaling.enabled (eq .Values.controller.kind "deployment") (.Capabilities.APIVersions.Has "autoscaling/v2") -}}
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:

View File

@ -1,12 +1,14 @@
{{ if .Values.controller.ingressClass.create }}
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
name: {{ .Values.controller.ingressClass }}
name: {{ .Values.controller.ingressClass.name }}
labels:
{{- include "nginx-ingress.labels" . | nindent 4 }}
{{- if .Values.controller.setAsDefaultIngress }}
{{- if .Values.controller.ingressClass.setAsDefaultIngress }}
annotations:
ingressclass.kubernetes.io/is-default-class: "true"
{{- end }}
spec:
controller: nginx.org/ingress-controller
{{ end }}

View File

@ -0,0 +1,21 @@
{{- if and .Values.prometheus.create .Values.prometheus.service.create}}
apiVersion: v1
kind: Service
metadata:
name: {{ include "nginx-ingress.prometheus.serviceName" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "nginx-ingress.labels" . | nindent 4 }}
{{- if .Values.prometheus.service.labels -}}
{{- toYaml .Values.prometheus.service.labels | nindent 4 }}
{{- end }}
spec:
clusterIP: None
ports:
- name: prometheus
protocol: TCP
port: {{ .Values.prometheus.port }}
targetPort: {{ .Values.prometheus.port }}
selector:
{{- include "nginx-ingress.selectorLabels" . | nindent 4 }}
{{- end }}

View File

@ -14,6 +14,9 @@ metadata:
{{ toYaml .Values.controller.service.annotations | indent 4 }}
{{- end }}
spec:
{{- if .Values.controller.service.clusterIP }}
clusterIP: {{ .Values.controller.service.clusterIP }}
{{- end }}
{{- if or (eq .Values.controller.service.type "LoadBalancer") (eq .Values.controller.service.type "NodePort") }}
{{- if .Values.controller.service.externalTrafficPolicy }}
externalTrafficPolicy: {{ .Values.controller.service.externalTrafficPolicy }}

View File

@ -1,4 +1,4 @@
{{- if .Values.controller.serviceMonitor.create }}
{{- if .Values.prometheus.serviceMonitor.create }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
@ -6,16 +6,16 @@ metadata:
namespace: {{ .Release.Namespace }}
labels:
{{- include "nginx-ingress.labels" . | nindent 4 }}
{{- if .Values.controller.serviceMonitor.labels -}}
{{- toYaml .Values.controller.serviceMonitor.labels | nindent 4 }}
{{- if .Values.prometheus.serviceMonitor.labels -}}
{{- toYaml .Values.prometheus.serviceMonitor.labels | nindent 4 }}
{{- end }}
spec:
selector:
matchLabels:
{{- if .Values.controller.serviceMonitor.selectorMatchLabels -}}
{{- toYaml .Values.controller.serviceMonitor.selectorMatchLabels | nindent 6 }}
{{- if .Values.prometheus.serviceMonitor.selectorMatchLabels -}}
{{- toYaml .Values.prometheus.serviceMonitor.selectorMatchLabels | nindent 6 }}
{{- end }}
{{- include "nginx-ingress.selectorLabels" . | nindent 6 }}
endpoints:
{{- toYaml .Values.controller.serviceMonitor.endpoints | nindent 4 }}
{{- toYaml .Values.prometheus.serviceMonitor.endpoints | nindent 4 }}
{{- end }}

View File

@ -4,7 +4,7 @@ controller:
nginxplus: true
image:
repository: mycluster.icp:8500/kube-system/nginx-plus-ingress
tag: "3.2.1"
tag: "3.3.0"
nodeSelector:
beta.kubernetes.io/arch: "amd64"
proxy: true

View File

@ -3,4 +3,4 @@ controller:
nginxplus: true
image:
repository: nginx-plus-ingress
tag: "3.2.1"
tag: "3.3.0"

View File

@ -42,11 +42,17 @@
"daemonset"
]
},
"selectorLabels": {
"type": "object",
"default": {},
"title": "The selectorLabels Schema",
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.27.4/_definitions.json#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector/properties/matchLabels"
},
"annotations": {
"type": "object",
"default": {},
"title": "The annotations Schema",
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.26.1/_definitions.json#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta/properties/annotations"
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.27.4/_definitions.json#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta/properties/annotations"
},
"nginxplus": {
"type": "boolean",
@ -181,11 +187,40 @@
true
]
},
"hostPort": {
"type": "object",
"default": {},
"title": "The hostPort Schema",
"patternProperties": {
"^.*$": {
"anyOf": [
{
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.27.4/_definitions.json#/definitions/io.k8s.api.core.v1.ContainerPort/properties/hostPort"
},
{
"type": "boolean"
}
]
}
},
"additionalProperties": false
},
"containerPort": {
"type": "object",
"default": {},
"title": "The containerPort Schema",
"patternProperties": {
"^.*$": {
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.27.4/_definitions.json#/definitions/io.k8s.api.core.v1.ContainerPort/properties/containerPort"
}
},
"additionalProperties": false
},
"dnsPolicy": {
"type": "string",
"allOf": [
{
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.26.1/_definitions.json#/definitions/io.k8s.api.core.v1.PodSpec/properties/dnsPolicy"
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.27.4/_definitions.json#/definitions/io.k8s.api.core.v1.PodSpec/properties/dnsPolicy"
},
{
"enum": [
@ -226,7 +261,7 @@
"title": "The customPorts to expose on the NGINX Ingress Controller pod",
"items": {
"type": "object",
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.26.1/_definitions.json#/definitions/io.k8s.api.core.v1.ContainerPort"
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.27.4/_definitions.json#/definitions/io.k8s.api.core.v1.ContainerPort"
},
"examples": [
[
@ -281,7 +316,7 @@
"title": "The pullPolicy for the Ingress Controller image",
"allOf": [
{
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.26.1/_definitions.json#/definitions/io.k8s.api.core.v1.Container/properties/imagePullPolicy"
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.27.4/_definitions.json#/definitions/io.k8s.api.core.v1.Container/properties/imagePullPolicy"
},
{
"enum": [
@ -310,7 +345,7 @@
"type": "object",
"default": {},
"title": "The lifecycle Schema",
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.26.1/_definitions.json#/definitions/io.k8s.api.core.v1.Lifecycle"
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.27.4/_definitions.json#/definitions/io.k8s.api.core.v1.Lifecycle"
},
"customConfigMap": {
"type": "string",
@ -338,7 +373,7 @@
"type": "object",
"default": {},
"title": "The annotations Schema",
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.26.1/_definitions.json#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta/properties/annotations"
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.27.4/_definitions.json#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta/properties/annotations"
},
"entries": {
"type": "object",
@ -425,19 +460,19 @@
"type": "object",
"default": {},
"title": "The nodeSelector Schema",
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.26.1/_definitions.json#/definitions/io.k8s.api.core.v1.PodSpec/properties/nodeSelector"
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.27.4/_definitions.json#/definitions/io.k8s.api.core.v1.PodSpec/properties/nodeSelector"
},
"terminationGracePeriodSeconds": {
"type": "integer",
"default": 30,
"title": "The terminationGracePeriodSeconds Schema",
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.26.1/_definitions.json#/definitions/io.k8s.api.core.v1.PodSpec/properties/terminationGracePeriodSeconds"
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.27.4/_definitions.json#/definitions/io.k8s.api.core.v1.PodSpec/properties/terminationGracePeriodSeconds"
},
"resources": {
"type": "object",
"default": {},
"title": "The resources Schema",
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.26.1/_definitions.json#/definitions/io.k8s.api.core.v1.ResourceRequirements"
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.27.4/_definitions.json#/definitions/io.k8s.api.core.v1.ResourceRequirements"
},
"tolerations": {
"type": "array",
@ -445,20 +480,20 @@
"title": "The tolerations Schema",
"items": {
"type": "object",
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.26.1/_definitions.json#/definitions/io.k8s.api.core.v1.Toleration"
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.27.4/_definitions.json#/definitions/io.k8s.api.core.v1.Toleration"
}
},
"affinity": {
"type": "object",
"default": {},
"title": "The affinity Schema",
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.26.1/_definitions.json#/definitions/io.k8s.api.core.v1.Affinity"
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.27.4/_definitions.json#/definitions/io.k8s.api.core.v1.Affinity"
},
"topologySpreadConstraints": {
"type": "object",
"default": {},
"title": "The topologySpreadConstraints Schema",
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.26.1/_definitions.json#/definitions/io.k8s.api.core.v1.PodSpec/properties/topologySpreadConstraints"
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.27.4/_definitions.json#/definitions/io.k8s.api.core.v1.PodSpec/properties/topologySpreadConstraints"
},
"env": {
"type": "array",
@ -466,7 +501,7 @@
"title": "The env Schema",
"items": {
"type": "object",
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.26.1/_definitions.json#/definitions/io.k8s.api.core.v1.EnvVar"
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.27.4/_definitions.json#/definitions/io.k8s.api.core.v1.EnvVar"
}
},
"volumes": {
@ -475,7 +510,7 @@
"title": "The volumes Schema",
"items": {
"type": "object",
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.26.1/_definitions.json#/definitions/io.k8s.api.core.v1.Volume"
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.27.4/_definitions.json#/definitions/io.k8s.api.core.v1.Volume"
}
},
"volumeMounts": {
@ -484,7 +519,7 @@
"title": "The volumeMounts Schema",
"items": {
"type": "object",
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.26.1/_definitions.json#/definitions/io.k8s.api.core.v1.VolumeMount"
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.27.4/_definitions.json#/definitions/io.k8s.api.core.v1.VolumeMount"
}
},
"initContainers": {
@ -493,14 +528,14 @@
"title": "The initContainers Schema",
"items": {
"type": "object",
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.26.1/_definitions.json#/definitions/io.k8s.api.core.v1.Container"
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.27.4/_definitions.json#/definitions/io.k8s.api.core.v1.Container"
}
},
"minReadySeconds": {
"type": "integer",
"default": 0,
"title": "The minReadySeconds Schema",
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.26.1/_definitions.json#/definitions/io.k8s.api.apps.v1.DeploymentSpec/properties/minReadySeconds"
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.27.4/_definitions.json#/definitions/io.k8s.api.apps.v1.DeploymentSpec/properties/minReadySeconds"
},
"strategy": {
"type": "object",
@ -508,7 +543,7 @@
"title": "The strategy Schema",
"allOf": [
{
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.26.1/_definitions.json#/definitions/io.k8s.api.apps.v1.DeploymentStrategy"
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.27.4/_definitions.json#/definitions/io.k8s.api.apps.v1.DeploymentStrategy"
},
{
"properties": {
@ -530,7 +565,7 @@
"title": "The extraContainers Schema",
"items": {
"type": "object",
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.26.1/_definitions.json#/definitions/io.k8s.api.core.v1.Container"
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.27.4/_definitions.json#/definitions/io.k8s.api.core.v1.Container"
}
},
"replicaCount": {
@ -542,20 +577,36 @@
]
},
"ingressClass": {
"type": "string",
"default": "",
"type": "object",
"default": {},
"title": "The ingressClass",
"examples": [
"nginx"
]
},
"setAsDefaultIngress": {
"type": "boolean",
"default": false,
"title": "The setAsDefaultIngress",
"examples": [
false
]
"required": [],
"properties": {
"create": {
"type": "boolean",
"default": true,
"title": "The create",
"examples": [
true
]
},
"name": {
"type": "string",
"default": "",
"title": "The ingressClass name",
"examples": [
"nginx"
]
},
"setAsDefaultIngress": {
"type": "boolean",
"default": false,
"title": "The setAsDefaultIngress",
"examples": [
false
]
}
}
},
"watchNamespace": {
"type": "string",
@ -613,6 +664,14 @@
false
]
},
"tlsPassthroughPort": {
"type": "integer",
"default": 443,
"title": "The tlsPassthroughPort",
"examples": [
443
]
},
"enableCertManager": {
"type": "boolean",
"default": false,
@ -782,19 +841,19 @@
"type": "string",
"default": "",
"title": "The type",
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.26.1/_definitions.json#/definitions/io.k8s.api.core.v1.ServiceSpec/properties/type"
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.27.4/_definitions.json#/definitions/io.k8s.api.core.v1.ServiceSpec/properties/type"
},
"externalTrafficPolicy": {
"type": "string",
"default": "",
"title": "The externalTrafficPolicy",
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.26.1/_definitions.json#/definitions/io.k8s.api.core.v1.ServiceSpec/properties/externalTrafficPolicy"
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.27.4/_definitions.json#/definitions/io.k8s.api.core.v1.ServiceSpec/properties/externalTrafficPolicy"
},
"annotations": {
"type": "object",
"default": {},
"title": "The annotations",
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.26.1/_definitions.json#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta/properties/annotations"
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.27.4/_definitions.json#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta/properties/annotations"
},
"extraLabels": {
"type": "object",
@ -810,13 +869,13 @@
"type": "string",
"default": "",
"title": "The loadBalancerIP",
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.26.1/_definitions.json#/definitions/io.k8s.api.core.v1.ServiceSpec/properties/loadBalancerIP"
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.27.4/_definitions.json#/definitions/io.k8s.api.core.v1.ServiceSpec/properties/loadBalancerIP"
},
"externalIPs": {
"type": "array",
"default": [],
"title": "The externalIPs",
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.26.1/_definitions.json#/definitions/io.k8s.api.core.v1.ServiceSpec/properties/externalIPs"
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.27.4/_definitions.json#/definitions/io.k8s.api.core.v1.ServiceSpec/properties/externalIPs"
},
"loadBalancerSourceRanges": {
"type": "array",
@ -831,13 +890,13 @@
"type": "boolean",
"default": false,
"title": "The allocateLoadBalancerNodePorts Schema",
"ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.26.1/_definitions.json#/definitions/io.k8s.api.core.v1.ServiceSpec/properties/allocateLoadBalancerNodePorts"
"ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.27.4/_definitions.json#/definitions/io.k8s.api.core.v1.ServiceSpec/properties/allocateLoadBalancerNodePorts"
},
"ipFamilyPolicy": {
"type": "string",
"default": "",
"title": "The ipFamilyPolicy Schema",
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.26.1/_definitions.json#/definitions/io.k8s.api.core.v1.ServiceSpec/properties/ipFamilyPolicy",
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.27.4/_definitions.json#/definitions/io.k8s.api.core.v1.ServiceSpec/properties/ipFamilyPolicy",
"examples": [
""
]
@ -846,7 +905,7 @@
"type": "array",
"default": [],
"title": "The ipFamilies Schema",
"ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.26.1/_definitions.json#/definitions/io.k8s.api.core.v1.ServiceSpec/properties/ipFamilies"
"ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.27.4/_definitions.json#/definitions/io.k8s.api.core.v1.ServiceSpec/properties/ipFamilies"
},
"httpPort": {
"type": "object",
@ -950,7 +1009,7 @@
"title": "The customPorts",
"items": {
"type": "object",
"ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.26.1/_definitions.json#/definitions/io.k8s.api.core.v1.ServicePort"
"ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.27.4/_definitions.json#/definitions/io.k8s.api.core.v1.ServicePort"
}
}
},
@ -992,7 +1051,7 @@
"type": "object",
"default": {},
"title": "The annotations Schema",
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.26.1/_definitions.json#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta/properties/annotations"
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.27.4/_definitions.json#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta/properties/annotations"
},
"name": {
"type": "string",
@ -1018,49 +1077,6 @@
}
]
},
"serviceMonitor": {
"type": "object",
"default": {},
"title": "The serviceMonitor Schema",
"required": [],
"properties": {
"create": {
"type": "boolean",
"default": false,
"title": "The create",
"examples": [
false
]
},
"labels": {
"type": "object",
"default": {},
"title": "The labels Schema",
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.26.1/_definitions.json#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta/properties/labels"
},
"selectorMatchLabels": {
"type": "object",
"default": {},
"title": "The selectorMatchLabels Schema",
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.26.1/_definitions.json#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector/properties/matchLabels"
},
"endpoints": {
"type": "array",
"default": [],
"title": "The endpoints",
"required": [],
"items": {}
}
},
"examples": [
{
"create": false,
"labels": {},
"selectorMatchLabels": {},
"endpoints": []
}
]
},
"reportIngressStatus": {
"type": "object",
"default": {},
@ -1113,7 +1129,7 @@
"type": "object",
"default": {},
"title": "The annotations Schema",
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.26.1/_definitions.json#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta/properties/annotations"
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.27.4/_definitions.json#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta/properties/annotations"
}
},
"examples": [
@ -1137,13 +1153,13 @@
"type": "object",
"default": {},
"title": "The annotations Schema",
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.26.1/_definitions.json#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta/properties/annotations"
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.27.4/_definitions.json#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta/properties/annotations"
},
"extraLabels": {
"type": "object",
"default": {},
"title": "The extraLabels Schema",
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.26.1/_definitions.json#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta/properties/labels"
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.27.4/_definitions.json#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta/properties/labels"
}
},
"examples": [
@ -1157,7 +1173,7 @@
"type": "string",
"default": "",
"title": "The priorityClassName",
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.26.1/_definitions.json#/definitions/io.k8s.api.core.v1.PodSpec/properties/priorityClassName"
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.27.4/_definitions.json#/definitions/io.k8s.api.core.v1.PodSpec/properties/priorityClassName"
},
"podDisruptionBudget": {
"type": "object",
@ -1174,13 +1190,13 @@
"type": "object",
"default": {},
"title": "The annotations Schema",
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.26.1/_definitions.json#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta/properties/annotations"
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.27.4/_definitions.json#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta/properties/annotations"
},
"minAvailable": {
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.26.1/_definitions.json#/definitions/io.k8s.api.policy.v1.PodDisruptionBudgetSpec/properties/minAvailable"
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.27.4/_definitions.json#/definitions/io.k8s.api.policy.v1.PodDisruptionBudgetSpec/properties/minAvailable"
},
"maxUnavailable": {
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.26.1/_definitions.json#/definitions/io.k8s.api.policy.v1.PodDisruptionBudgetSpec/properties/maxUnavailable"
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.27.4/_definitions.json#/definitions/io.k8s.api.policy.v1.PodDisruptionBudgetSpec/properties/maxUnavailable"
}
},
"examples": [
@ -1219,7 +1235,7 @@
"initialDelaySeconds": {
"type": "integer",
"default": 0,
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.26.1/_definitions.json#/definitions/io.k8s.api.core.v1.Probe/properties/initialDelaySeconds"
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.27.4/_definitions.json#/definitions/io.k8s.api.core.v1.Probe/properties/initialDelaySeconds"
}
},
"examples": [
@ -1326,6 +1342,7 @@
"enableOIDC": false,
"includeYear": false,
"enableTLSPassthrough": false,
"tlsPassthroughPort": 443,
"enableCertManager": false,
"enableExternalDNS": false,
"globalConfiguration": {
@ -1460,6 +1477,69 @@
"examples": [
"http"
]
},
"service": {
"type": "object",
"default": {},
"properties": {
"create": {
"type": "boolean",
"default": false,
"title": "The create",
"examples": [
true
]
},
"labels": {
"type": "object",
"default": {},
"title": "The labels Schema",
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.27.4/_definitions.json#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta/properties/labels"
}
}
},
"serviceMonitor": {
"type": "object",
"default": {},
"title": "The serviceMonitor Schema",
"required": [],
"properties": {
"create": {
"type": "boolean",
"default": false,
"title": "The create",
"examples": [
false
]
},
"labels": {
"type": "object",
"default": {},
"title": "The labels Schema",
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.27.4/_definitions.json#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta/properties/labels"
},
"selectorMatchLabels": {
"type": "object",
"default": {},
"title": "The selectorMatchLabels Schema",
"$ref": "https://raw.githubusercontent.com/nginxinc/kubernetes-json-schema/master/v1.27.4/_definitions.json#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector/properties/matchLabels"
},
"endpoints": {
"type": "array",
"default": [],
"title": "The endpoints",
"required": [],
"items": {}
}
},
"examples": [
{
"create": false,
"labels": {},
"selectorMatchLabels": {},
"endpoints": []
}
]
}
},
"examples": [
@ -1676,12 +1756,6 @@
"minAvailable": 0,
"minUnavailable": 0
},
"serviceMonitor": {
"create": false,
"labels": {},
"selectorMatchLabels": {},
"endpoints": {}
},
"reportIngressStatus": {
"enable": true,
"externalService": "",
@ -1711,7 +1785,17 @@
"create": true,
"port": 9113,
"secret": "",
"scheme": "http"
"scheme": "http",
"service": {
"create": false,
"labels": {}
},
"serviceMonitor": {
"create": false,
"labels": {},
"selectorMatchLabels": {},
"endpoints": {}
}
},
"serviceInsight": {
"create": true,

View File

@ -5,6 +5,9 @@ controller:
## The kind of the Ingress Controller installation - deployment or daemonset.
kind: deployment
## The selectorLabels used to override the default values.
selectorLabels: {}
## Annotations for deployments and daemonsets
annotations: {}
@ -37,6 +40,24 @@ controller:
## Enables the Ingress Controller pods to use the host's network namespace.
hostNetwork: false
## The hostPort configuration for the Ingress Controller pods.
hostPort:
## Enables hostPort for the Ingress Controller pods.
enable: false
## The HTTP hostPort configuration for the Ingress Controller pods.
http: 80
## The HTTPS hostPort configuration for the Ingress Controller pods.
https: 443
containerPort:
## The HTTP containerPort configuration for the Ingress Controller pods.
http: 80
## The HTTPS containerPort configuration for the Ingress Controller pods.
https: 443
## DNS policy for the Ingress Controller pods
dnsPolicy: ClusterFirst
@ -54,7 +75,7 @@ controller:
repository: nginx/nginx-ingress
## The tag of the Ingress Controller image. If not specified the appVersion from Chart.yaml is used as a tag.
# tag: "3.2.1"
# tag: "3.3.0"
## The digest of the Ingress Controller image.
## If digest is specified it has precedence over tag and will be used instead
@ -82,11 +103,11 @@ controller:
## It is recommended to use your own TLS certificates and keys
defaultTLS:
## The base64-encoded TLS certificate for the default HTTPS server. By default, a pre-generated self-signed certificate is used.
## The base64-encoded TLS certificate for the default HTTPS server.
## Note: It is recommended that you specify your own certificate. Alternatively, omitting the default server secret completely will configure NGINX to reject TLS connections to the default server.
cert: ""
## The base64-encoded TLS key for the default HTTPS server. By default, a pre-generated key is used.
## The base64-encoded TLS key for the default HTTPS server.
## Note: It is recommended that you specify your own key. Alternatively, omitting the default server secret completely will configure NGINX to reject TLS connections to the default server.
key: ""
@ -200,17 +221,22 @@ controller:
## The number of replicas of the Ingress Controller deployment.
replicaCount: 1
## A class of the Ingress Controller.
# Configures the ingress class the Ingress Controller uses.
ingressClass:
## A class of the Ingress Controller.
## IngressClass resource with the name equal to the class must be deployed. Otherwise,
## the Ingress Controller will fail to start.
## The Ingress Controller only processes resources that belong to its class - i.e. have the "ingressClassName" field resource equal to the class.
## IngressClass resource with the name equal to the class must be deployed. Otherwise,
## the Ingress Controller will fail to start.
## The Ingress Controller only processes resources that belong to its class - i.e. have the "ingressClassName" field resource equal to the class.
## The Ingress Controller processes all the resources that do not have the "ingressClassName" field for all versions of kubernetes.
ingressClass: nginx
## The Ingress Controller processes all the resources that do not have the "ingressClassName" field for all versions of kubernetes.
name: nginx
## New Ingresses without an ingressClassName field specified will be assigned the class specified in `controller.ingressClass`.
setAsDefaultIngress: false
## Creates a new IngressClass object with the name "controller.ingressClass.name". Set to false to use an existing IngressClass with the same name. If you use helm upgrade, do not change the values from the previous release as helm will delete IngressClass objects managed by helm. If you are upgrading from a release earlier than 3.3.0, do not set the value to false.
create: true
## New Ingresses without an ingressClassName field specified will be assigned the class specified in `controller.ingressClass`. Requires "controller.ingressClass.create".
setAsDefaultIngress: false
## Comma separated list of namespaces to watch for Ingress resources. By default the Ingress Controller watches all namespaces. Mutually exclusive with "controller.watchNamespaceLabel".
watchNamespace: ""
@ -236,6 +262,9 @@ controller:
## Enable TLS Passthrough on port 443. Requires controller.enableCustomResources.
enableTLSPassthrough: false
## Set the port for TLS Passthrough. Requires controller.enableCustomResources and controller.enableTLSPassthrough.
tlsPassthroughPort: 443
## Enable cert manager for Virtual Server resources. Requires controller.enableCustomResources.
enableCertManager: false
@ -247,14 +276,15 @@ controller:
create: false
## The spec of the GlobalConfiguration for defining the global configuration parameters of the Ingress Controller.
spec: {}
# listeners:
# - name: dns-udp
# port: 5353
# protocol: UDP
# - name: dns-tcp
# port: 5353
# protocol: TCP
spec: {} ## Ensure both curly brackets are removed when adding listeners in YAML format.
# listeners:
# - name: dns-udp
# port: 5353
# protocol: UDP
# - name: dns-tcp
# port: 5353
# protocol: TCP
## Enable custom NGINX configuration snippets in Ingress, VirtualServer, VirtualServerRoute and TransportServer resources.
enableSnippets: false
@ -295,6 +325,9 @@ controller:
## The static IP address for the load balancer. Requires controller.service.type set to LoadBalancer. The cloud provider must support this feature.
loadBalancerIP: ""
## The ClusterIP for the Ingress Controller service, autoassigned if not specified.
clusterIP: ""
## The list of external IPs for the Ingress Controller service.
externalIPs: []
@ -354,19 +387,6 @@ controller:
## Secret must exist in the same namespace as the helm release.
imagePullSecretName: ""
serviceMonitor:
## Creates a serviceMonitor to expose statistics on the kubernetes pods.
create: false
## Kubernetes object labels to attach to the serviceMonitor object.
labels: {}
## A set of labels to allow the selection of endpoints for the ServiceMonitor.
selectorMatchLabels: {}
## A list of endpoints allowed as part of this ServiceMonitor.
endpoints: []
reportIngressStatus:
## Updates the address field in the status of Ingress resources with an external address of the Ingress Controller.
## You must also specify the source of the external address either through an external service via controller.reportIngressStatus.externalService,
@ -441,6 +461,30 @@ prometheus:
## Configures the HTTP scheme used.
scheme: http
service:
## Creates a ClusterIP Service to expose Prometheus metrics internally
## Requires prometheus.create=true
create: false
labels:
service: "nginx-ingress-prometheus-service"
serviceMonitor:
## Creates a serviceMonitor to expose statistics on the kubernetes pods.
create: false
## Kubernetes object labels to attach to the serviceMonitor object.
labels: {}
## A set of labels to allow the selection of endpoints for the ServiceMonitor.
selectorMatchLabels:
service: "nginx-ingress-prometheus-service"
## A list of endpoints allowed as part of this ServiceMonitor.
## Matches on the name of a Service port.
endpoints:
- port: prometheus
serviceInsight:
## Expose NGINX Plus Service Insight endpoint.
create: false

View File

@ -4,9 +4,9 @@ annotations:
catalog.cattle.io/kube-version: '>=1.19-0'
catalog.cattle.io/release-name: intel-device-plugins-operator
apiVersion: v2
appVersion: 0.27.1
appVersion: 0.28.0
description: A Helm chart for Intel Device Plugins Operator for Kubernetes
icon: https://avatars.githubusercontent.com/u/17888862?s=200&v=4
name: intel-device-plugins-operator
type: application
version: 0.27.1
version: 0.28.0

View File

@ -42,6 +42,12 @@ You may also run `helm show values` on this chart's dependencies for additional
|parameter| value |
|---------|-----------|
| `hub` | `intel` |
| `tag` | `` |
| `manager.image.hub` | `intel` |
| `manager.image.tag` | `` |
| `kubeRbacProxy.image.hub` | `gcr.io` |
| `kubeRbacProxy.image.tag` | `v0.14.1` |
| `kubeRbacProxy.image.pullPolicy` | `IfNotPresent` |
| `privateRegistry.registryUrl` | `` |
| `privateRegistry.registryUser` | `` |
| `privateRegistry.registrySecret` | `` |
| `pullPolicy` | `IfNotPresent` |

View File

@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.9.2
creationTimestamp: null
controller-gen.kubebuilder.io/version: v0.13.0
name: dlbdeviceplugins.deviceplugin.intel.com
spec:
group: deviceplugin.intel.com
@ -53,6 +52,10 @@ spec:
image:
description: Image is a container image with DLB device plugin executable.
type: string
initImage:
description: InitImage is a container image with a script that initializes
devices.
type: string
logLevel:
description: LogLevel sets the plugin's log level.
minimum: 0

View File

@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.9.2
creationTimestamp: null
controller-gen.kubebuilder.io/version: v0.13.0
name: dsadeviceplugins.deviceplugin.intel.com
spec:
group: deviceplugin.intel.com

View File

@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.9.2
creationTimestamp: null
controller-gen.kubebuilder.io/version: v0.13.0
name: fpgadeviceplugins.deviceplugin.intel.com
spec:
group: deviceplugin.intel.com

View File

@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.9.2
creationTimestamp: null
controller-gen.kubebuilder.io/version: v0.13.0
name: gpudeviceplugins.deviceplugin.intel.com
spec:
group: deviceplugin.intel.com
@ -52,7 +51,8 @@ spec:
properties:
enableMonitoring:
description: EnableMonitoring enables the monitoring resource ('i915_monitoring')
which gives access to all GPU devices on given node.
which gives access to all GPU devices on given node. Typically used
with Intel XPU-Manager.
type: boolean
image:
description: Image is a container image with GPU device plugin executable.
@ -74,7 +74,8 @@ spec:
preferredAllocationPolicy:
description: PreferredAllocationPolicy sets the mode of allocating
GPU devices on a node. See documentation for detailed description
of the policies. Only valid when SharedDevNum > 1 is set.
of the policies. Only valid when SharedDevNum > 1 is set. Not applicable
with ResourceManager.
enum:
- balanced
- packed
@ -82,7 +83,7 @@ spec:
type: string
resourceManager:
description: ResourceManager handles the fractional resource management
for multi-GPU nodes
for multi-GPU nodes. Enable only for clusters with GPU Aware Scheduling.
type: boolean
sharedDevNum:
description: SharedDevNum is a number of containers that can share

View File

@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.9.2
creationTimestamp: null
controller-gen.kubebuilder.io/version: v0.13.0
name: iaadeviceplugins.deviceplugin.intel.com
spec:
group: deviceplugin.intel.com

View File

@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.9.2
creationTimestamp: null
controller-gen.kubebuilder.io/version: v0.13.0
name: qatdeviceplugins.deviceplugin.intel.com
spec:
group: deviceplugin.intel.com

View File

@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.9.2
creationTimestamp: null
controller-gen.kubebuilder.io/version: v0.13.0
name: sgxdeviceplugins.deviceplugin.intel.com
spec:
group: deviceplugin.intel.com
@ -59,8 +58,9 @@ spec:
description: Image is a container image with SGX device plugin executable.
type: string
initImage:
description: InitImage is a container image with tools (e.g., SGX
NFD source hook) installed on each node.
description: InitImage is a container image with tools (i.e., SGX
NFD source hook) installed on each node. Recommendation is to leave
this unset and prefer the SGX NodeFeatureRule instead.
type: string
logLevel:
description: LogLevel sets the plugin's log level.

View File

@ -487,6 +487,7 @@ spec:
runAsGroup: 1000
runAsNonRoot: true
runAsUser: 1000
nodeSelector: {{- .Values.nodeSelector | toYaml | nindent 8 }}
serviceAccountName: default
terminationGracePeriodSeconds: 10
volumes:

View File

@ -1,3 +1,6 @@
nodeSelector:
kubernetes.io/arch: amd64
manager:
image:
hub: intel
@ -14,4 +17,4 @@ kubeRbacProxy:
privateRegistry:
registryUrl: ""
registryUser: ""
registrySecret: ""
registrySecret: ""

View File

@ -4,9 +4,9 @@ annotations:
catalog.cattle.io/kube-version: '>=1.19-0'
catalog.cattle.io/release-name: intel-device-plugins-qat
apiVersion: v2
appVersion: 0.27.1
appVersion: 0.28.0
description: A Helm chart for Intel QAT Device Plugin
icon: https://avatars.githubusercontent.com/u/17888862?s=200&v=4
name: intel-device-plugins-qat
type: application
version: 0.27.1
version: 0.28.0

View File

@ -38,8 +38,10 @@ You may also run `helm show values` on this chart's dependencies for additional
|parameter| value |
|---------|-----------|
| `hub` | `intel` |
| `tag` | `` |
| `image.hub` | `intel` |
| `image.tag` | `` |
| `initImage.hub` | `intel` |
| `initImage.tag` | `` |
| `dpdkDriver` | `vfio-pci` |
| `kernelVfDrivers` | `c6xxvf`, `4xxxvf` |
| `maxNumDevices` | `128` |

View File

@ -4,9 +4,9 @@ annotations:
catalog.cattle.io/kube-version: '>=1.19-0'
catalog.cattle.io/release-name: intel-device-plugins-sgx
apiVersion: v2
appVersion: 0.27.1
appVersion: 0.28.0
description: A Helm chart for Intel SGX Device Plugin
icon: https://avatars.githubusercontent.com/u/17888862?s=200&v=4
name: intel-device-plugins-sgx
type: application
version: 0.27.1
version: 0.28.0

View File

@ -33,8 +33,8 @@ You may also run `helm show values` on this chart's dependencies for additional
|parameter| value |
|---------|-----------|
| `hub` | `intel` |
| `tag` | `` |
| `image.hub` | `intel` |
| `image.tag` | `` |
| `enclaveLimit` | `110` |
| `provisionLimit` | `110` |
| `logLevel` | `4` |

View File

@ -12,6 +12,11 @@ Use the following links to reference issues, PRs, and commits prior to v2.6.0.
The changelog until v1.5.7 was auto-generated based on git commits.
Those entries include a reference to the git commit to be able to get more details.
## 4.6.5
Update Jenkins image and appVersion to jenkins lts release version 2.414.2
## 4.6.4
Introducing TPL function on variables related to hostname in `./charts/jenkins/templates/jenkins-controller-ingress.yaml`

View File

@ -2,7 +2,7 @@ annotations:
artifacthub.io/category: integration-delivery
artifacthub.io/images: |
- name: jenkins
image: jenkins/jenkins:2.414.1-jdk11
image: jenkins/jenkins:2.414.2-jdk11
- name: k8s-sidecar
image: kiwigrid/k8s-sidecar:1.24.4
- name: inbound-agent
@ -22,7 +22,7 @@ annotations:
catalog.cattle.io/kube-version: '>=1.14-0'
catalog.cattle.io/release-name: jenkins
apiVersion: v2
appVersion: 2.414.1
appVersion: 2.414.2
description: Jenkins - Build great things at any scale! The leading open source automation
server, Jenkins provides over 1800 plugins to support building, deploying and automating
any project.
@ -49,4 +49,4 @@ sources:
- https://github.com/jenkinsci/docker-inbound-agent
- https://github.com/maorfr/kube-tasks
- https://github.com/jenkinsci/configuration-as-code-plugin
version: 4.6.4
version: 4.6.5

View File

@ -22,7 +22,7 @@ controller:
# Used for label app.kubernetes.io/component
componentName: "jenkins-controller"
image: "jenkins/jenkins"
# tag: "2.414.1-jdk11"
# tag: "2.414.2-jdk11"
tagLabel: jdk11
imagePullPolicy: "Always"
imagePullSecretName:

View File

@ -4,6 +4,17 @@
Nothing yet.
## 2.28.0
### Improvements
* Bump default `kong` image tag to 3.4.
[#883](https://github.com/Kong/charts/pull/883)
* Bump default ingress controller image tag to 2.12.
* Added validation rule for `latency` upstream load balancing algorithm to
CRDs. [Upgrade your CRDs](https://github.com/Kong/charts/blob/main/charts/kong/UPGRADE.md#updates-to-crds)
when installing this release.
## 2.27.0
### Improvements

View File

@ -3,7 +3,7 @@ annotations:
catalog.cattle.io/display-name: Kong Gateway
catalog.cattle.io/release-name: kong
apiVersion: v2
appVersion: "3.3"
appVersion: "3.4"
dependencies:
- condition: postgresql.enabled
name: postgresql
@ -20,4 +20,4 @@ maintainers:
name: kong
sources:
- https://github.com/Kong/charts/tree/main/charts/kong
version: 2.27.0
version: 2.28.0

View File

@ -600,7 +600,7 @@ directory.
| Parameter | Description | Default |
| ---------------------------------- | ------------------------------------------------------------------------------------- | ------------------- |
| image.repository | Kong image | `kong` |
| image.tag | Kong image version | `2.5` |
| image.tag | Kong image version | `3.4` |
| image.pullPolicy | Image pull policy | `IfNotPresent` |
| image.pullSecrets | Image pull secrets | `null` |
| replicaCount | Kong instance count. It has no effect when `autoscaling.enabled` is set to true | `1` |
@ -724,7 +724,7 @@ section of `values.yaml` file:
|--------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------|
| enabled | Deploy the ingress controller, rbac and crd | true |
| image.repository | Docker image with the ingress controller | kong/kubernetes-ingress-controller |
| image.tag | Version of the ingress controller | 2.0 |
| image.tag | Version of the ingress controller | `2.12` |
| image.effectiveSemver | Version of the ingress controller used for version-specific features when image.tag is not a valid semantic version | |
| readinessProbe | Kong ingress controllers readiness probe | |
| livenessProbe | Kong ingress controllers liveness probe | |

View File

@ -2,7 +2,7 @@
# use single image strings instead of repository/tag
image:
unifiedRepoTag: kong:3.3
unifiedRepoTag: kong:3.4
env:
anonymous_reports: "off"
@ -10,4 +10,4 @@ ingressController:
env:
anonymous_reports: "false"
image:
unifiedRepoTag: kong/kubernetes-ingress-controller:2.11
unifiedRepoTag: kong/kubernetes-ingress-controller:2.12

View File

@ -1,9 +1,9 @@
# generated using: kubectl kustomize 'github.com/kong/kubernetes-ingress-controller/config/crd?ref=v2.11.0'
# generated using: kubectl kustomize 'github.com/kong/kubernetes-ingress-controller/config/crd?ref=v2.12.0'
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.12.1
controller-gen.kubebuilder.io/version: v0.13.0
name: ingressclassparameterses.configuration.konghq.com
spec:
group: configuration.konghq.com
@ -55,7 +55,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.12.1
controller-gen.kubebuilder.io/version: v0.13.0
name: kongclusterplugins.configuration.konghq.com
spec:
group: configuration.konghq.com
@ -306,7 +306,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.12.1
controller-gen.kubebuilder.io/version: v0.13.0
name: kongconsumergroups.configuration.konghq.com
spec:
group: configuration.konghq.com
@ -441,7 +441,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.12.1
controller-gen.kubebuilder.io/version: v0.13.0
name: kongconsumers.configuration.konghq.com
spec:
group: configuration.konghq.com
@ -599,7 +599,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.12.1
controller-gen.kubebuilder.io/version: v0.13.0
name: kongingresses.configuration.konghq.com
spec:
group: configuration.konghq.com
@ -778,6 +778,7 @@ spec:
- round-robin
- consistent-hashing
- least-connections
- latency
type: string
hash_fallback:
description: 'HashFallback defines What to use as hashing input if
@ -953,7 +954,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.12.1
controller-gen.kubebuilder.io/version: v0.13.0
name: kongplugins.configuration.konghq.com
spec:
group: configuration.konghq.com
@ -1199,7 +1200,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.12.1
controller-gen.kubebuilder.io/version: v0.13.0
name: tcpingresses.configuration.konghq.com
spec:
group: configuration.konghq.com
@ -1385,7 +1386,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.12.1
controller-gen.kubebuilder.io/version: v0.13.0
name: udpingresses.configuration.konghq.com
spec:
group: configuration.konghq.com

View File

@ -146,7 +146,7 @@ extraLabels:
konghq.com/component: quickstart
image:
repository: kong/kong-gateway
tag: "3.3"
tag: "3.4"
ingressController:
enabled: true
env:

View File

@ -12,7 +12,7 @@
image:
repository: kong/kong-gateway
tag: "3.3"
tag: "3.4"
env:
prefix: /kong_prefix/

View File

@ -9,7 +9,7 @@
image:
repository: kong/kong-gateway
tag: "3.3"
tag: "3.4"
admin:
enabled: true

View File

@ -2,7 +2,7 @@
image:
repository: kong
tag: "3.3"
tag: "3.4"
env:
prefix: /kong_prefix/

View File

@ -4,7 +4,7 @@
image:
repository: kong/kong-gateway
tag: "3.3"
tag: "3.4"
enterprise:
enabled: true

View File

@ -14,7 +14,7 @@
image:
repository: kong/kong-gateway
tag: "3.3"
tag: "3.4"
env:
database: postgres

View File

@ -12,7 +12,7 @@
image:
repository: kong/kong-gateway
tag: "3.3"
tag: "3.4"
env:
role: data_plane

View File

@ -6,7 +6,7 @@
image:
repository: kong
tag: "3.3"
tag: "3.4"
env:
prefix: /kong_prefix/

View File

@ -11,7 +11,7 @@
image:
repository: kong
tag: "3.3"
tag: "3.4"
env:
prefix: /kong_prefix/

View File

@ -6,7 +6,7 @@
image:
repository: kong
tag: "3.3"
tag: "3.4"
env:
prefix: /kong_prefix/

View File

@ -121,10 +121,10 @@ extraLabels: {}
# Specify Kong's Docker image and repository details here
image:
repository: kong
tag: "3.3"
tag: "3.4"
# Kong Enterprise
# repository: kong/kong-gateway
# tag: "3.3"
# tag: "3.4"
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
@ -514,7 +514,7 @@ ingressController:
enabled: true
image:
repository: kong/kubernetes-ingress-controller
tag: "2.11"
tag: "2.12"
# Optionally set a semantic version for version-gated features. This can normally
# be left unset. You only need to set this if your tag is not a semver string,
# such as when you are using a "next" tag. Set this to the effective semantic

View File

@ -5,7 +5,7 @@ annotations:
catalog.cattle.io/kube-version: '>=1.21.0-0'
catalog.cattle.io/release-name: linkerd-control-plane
apiVersion: v2
appVersion: stable-2.14.0
appVersion: stable-2.14.1
dependencies:
- name: partials
repository: file://./charts/partials
@ -25,4 +25,4 @@ name: linkerd-control-plane
sources:
- https://github.com/linkerd/linkerd2/
type: application
version: 1.15.0
version: 1.16.2

View File

@ -3,7 +3,7 @@
Linkerd gives you observability, reliability, and security
for your microservices — with no code change required.
![Version: 1.15.0](https://img.shields.io/badge/Version-1.15.0-informational?style=flat-square)
![Version: 1.16.2](https://img.shields.io/badge/Version-1.16.2-informational?style=flat-square)
![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
![AppVersion: edge-XX.X.X](https://img.shields.io/badge/AppVersion-edge--XX.X.X-informational?style=flat-square)
@ -256,7 +256,7 @@ Kubernetes: `>=1.21.0-0`
| proxyInit.ignoreOutboundPorts | string | `"4567,4568"` | Default set of outbound ports to skip via iptables - Galera (4567,4568) |
| proxyInit.image.name | string | `"cr.l5d.io/linkerd/proxy-init"` | Docker image for the proxy-init container |
| proxyInit.image.pullPolicy | string | imagePullPolicy | Pull policy for the proxy-init container Docker image |
| proxyInit.image.version | string | `"v2.2.1"` | Tag for the proxy-init container Docker image |
| proxyInit.image.version | string | `"v2.2.3"` | Tag for the proxy-init container Docker image |
| proxyInit.iptablesMode | string | `"legacy"` | Variant of iptables that will be used to configure routing. Currently, proxy-init can be run either in 'nft' or in 'legacy' mode. The mode will control which utility binary will be called. The host must support whichever mode will be used |
| proxyInit.kubeAPIServerPorts | string | `"443,6443"` | Default set of ports to skip via iptables for control plane components so they can communicate with the Kubernetes API Server |
| proxyInit.logFormat | string | plain | Log format (`plain` or `json`) for the proxy-init |

View File

@ -171,7 +171,7 @@ webhooks:
rules:
- operations: ["CREATE", "UPDATE"]
apiGroups: ["policy.linkerd.io"]
apiVersions: ["v1alpha1", "v1beta1"]
apiVersions: ["*"]
resources:
- authorizationpolicies
- httproutes

View File

@ -153,9 +153,6 @@ spec:
template:
metadata:
annotations:
{{- if empty .Values.cliVersion }}
linkerd.io/helm-release-version: {{.Release.Revision | quote}}
{{- end }}
{{ include "partials.annotations.created-by" . }}
{{- include "partials.proxy.annotations" . | nindent 8}}
{{- with .Values.podAnnotations }}{{ toYaml . | trim | nindent 8 }}{{- end }}

View File

@ -32,9 +32,6 @@ spec:
template:
metadata:
annotations:
{{- if empty .Values.cliVersion }}
linkerd.io/helm-release-version: {{.Release.Revision | quote}}
{{- end }}
{{ include "partials.annotations.created-by" . }}
{{- include "partials.proxy.annotations" . | nindent 8}}
{{- with .Values.podAnnotations }}{{ toYaml . | trim | nindent 8 }}{{- end }}

View File

@ -53,5 +53,8 @@ webhookFailurePolicy: Fail
# service profile validator configuration
spValidatorResources: *controller_resources
# policy controller configuration
policyControllerResources: *controller_resources
# flag for linkerd check
highAvailability: true

View File

@ -22,7 +22,7 @@ controlPlaneTracing: false
# -- namespace to send control plane traces to
controlPlaneTracingNamespace: linkerd-jaeger
# -- control plane version. See Proxy section for proxy version
linkerdVersion: stable-2.14.0
linkerdVersion: stable-2.14.1
# -- default kubernetes deployment strategy
deploymentStrategy:
rollingUpdate:
@ -217,7 +217,7 @@ proxyInit:
# @default -- imagePullPolicy
pullPolicy: ""
# -- Tag for the proxy-init container Docker image
version: v2.2.1
version: v2.2.3
resources:
cpu:
# -- Maximum amount of CPU units that the proxy-init container can use
@ -502,6 +502,9 @@ policyValidator:
# for more information.
injectCaFromSecret: ""
# -|- CPU, Memory and Ephemeral Storage resources required by the policy controller
#policyControllerResources:
# -- NodeSelector section, See the [K8S
# documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector)
# for more information

View File

@ -1 +0,0 @@
tests

View File

@ -3,20 +3,18 @@ annotations:
catalog.cattle.io/display-name: YugabyteDB
catalog.cattle.io/kube-version: '>=1.18-0'
catalog.cattle.io/release-name: yugabyte
charts.openshift.io/name: yugabyte
apiVersion: v2
appVersion: 2.18.3.0-b75
apiVersion: v1
appVersion: 2.14.13.0-b13
description: YugabyteDB is the high-performance distributed SQL database for building
global, internet-scale apps.
home: https://www.yugabyte.com
icon: https://avatars0.githubusercontent.com/u/17074854?s=200&v=4
kubeVersion: '>=1.18-0'
maintainers:
- email: sanketh@yugabyte.com
name: Sanketh Indarapu
- email: gjalla@yugabyte.com
name: Govardhan Reddy Jalla
- email: ram@yugabyte.com
name: Ram Sri
- email: arnav@yugabyte.com
name: Arnav Agarwal
name: yugabyte
sources:
- https://github.com/yugabyte/yugabyte-db
version: 2.18.3+0
version: 2.14.13

View File

@ -1 +1 @@
This chart bootstraps an RF3 YugabyteDB version 2.18.3.0-b75 cluster using the Helm Package Manager.
This chart bootstraps an RF3 Yugabyte DB version 2.14.13.0-b13 cluster using the Helm Package Manager.

View File

@ -11,209 +11,84 @@ from sys import exit
import json
import base64
import tempfile
import time
import os.path
def run_command(command_args, namespace=None, as_json=True, log_command=True):
command = ["kubectl"]
def run_command(command_args, namespace=None, as_json=True):
command = ['kubectl']
if namespace:
command.extend(["--namespace", namespace])
command.extend(['--namespace', namespace])
command.extend(command_args)
if as_json:
command.extend(["-o", "json"])
if log_command:
print("Running command: {}".format(" ".join(command)))
output = check_output(command)
if as_json:
return json.loads(output)
command.extend(['-o', 'json'])
return json.loads(check_output(command))
else:
return output.decode("utf8")
return check_output(command).decode('utf8')
def create_sa_token_secret(directory, sa_name, namespace):
"""Creates a service account token secret for sa_name in
namespace. Returns the name of the secret created.
Ref:
https://k8s.io/docs/concepts/configuration/secret/#service-account-token-secrets
"""
token_secret = {
"apiVersion": "v1",
"data": {
"do-not-delete-used-for-yugabyte-anywhere": "MQ==",
},
"kind": "Secret",
"metadata": {
"annotations": {
"kubernetes.io/service-account.name": sa_name,
},
"name": sa_name,
},
"type": "kubernetes.io/service-account-token",
}
token_secret_file_name = os.path.join(directory, "token_secret.yaml")
with open(token_secret_file_name, "w") as token_secret_file:
json.dump(token_secret, token_secret_file)
run_command(["apply", "-f", token_secret_file_name], namespace)
return sa_name
def get_secret_data(secret, namespace):
"""Returns the secret in JSON format if it has ca.crt and token in
it, else returns None. It retries 3 times with 1 second timeout
for the secret to be populated with this data.
"""
secret_data = None
num_retries = 5
timeout = 2
while True:
secret_json = run_command(["get", "secret", secret], namespace)
if "ca.crt" in secret_json["data"] and "token" in secret_json["data"]:
secret_data = secret_json
break
num_retries -= 1
if num_retries == 0:
break
print(
"Secret '{}' is not populated. Sleep {}s, ({} retries left)".format(
secret, timeout, num_retries
)
)
time.sleep(timeout)
return secret_data
def get_secrets_for_sa(sa_name, namespace):
"""Returns a list of all service account token secrets associated
with the given sa_name in the namespace.
"""
secrets = run_command(
[
"get",
"secret",
"--field-selector",
"type=kubernetes.io/service-account-token",
"-o",
'jsonpath="{.items[?(@.metadata.annotations.kubernetes\.io/service-account\.name == "'
+ sa_name
+ '")].metadata.name}"',
],
as_json=False,
)
return secrets.strip('"').split()
parser = argparse.ArgumentParser(description="Generate KubeConfig with Token")
parser.add_argument("-s", "--service_account", help="Service Account name", required=True)
parser.add_argument("-n", "--namespace", help="Kubernetes namespace", default="kube-system")
parser.add_argument("-c", "--context", help="kubectl context")
parser.add_argument("-o", "--output_file", help="output file path")
parser = argparse.ArgumentParser(description='Generate KubeConfig with Token')
parser.add_argument('-s', '--service_account', help='Service Account name', required=True)
parser.add_argument('-n', '--namespace', help='Kubernetes namespace', default='kube-system')
parser.add_argument('-c', '--context', help='kubectl context')
args = vars(parser.parse_args())
# if the context is not provided we use the current-context
context = args["context"]
context = args['context']
if context is None:
context = run_command(["config", "current-context"], args["namespace"], as_json=False)
context = run_command(['config', 'current-context'],
args['namespace'], as_json=False)
cluster_attrs = run_command(
["config", "get-contexts", context.strip(), "--no-headers"], args["namespace"], as_json=False
)
cluster_attrs = run_command(['config', 'get-contexts', context.strip(),
'--no-headers'], args['namespace'], as_json=False)
cluster_name = cluster_attrs.strip().split()[2]
endpoint = run_command(
[
"config",
"view",
"-o",
'jsonpath="{.clusters[?(@.name =="' + cluster_name + '")].cluster.server}"',
],
args["namespace"],
as_json=False,
)
service_account_info = run_command(["get", "sa", args["service_account"]], args["namespace"])
tmpdir = tempfile.TemporaryDirectory()
# Get the token and ca.crt from service account secret.
sa_secrets = list()
# Get secrets specified in the service account, there can be multiple
# of them, and not all are service account token secrets.
if "secrets" in service_account_info:
sa_secrets = [secret["name"] for secret in service_account_info["secrets"]]
# Find the existing additional service account token secrets
sa_secrets.extend(get_secrets_for_sa(args["service_account"], args["namespace"]))
endpoint = run_command(['config', 'view', '-o',
'jsonpath="{.clusters[?(@.name =="' +
cluster_name + '")].cluster.server}"'],
args['namespace'], as_json=False)
service_account_info = run_command(['get', 'sa', args['service_account']],
args['namespace'])
# some ServiceAccounts have multiple secrets, and not all them have a
# ca.crt and a token.
sa_secrets = [secret['name'] for secret in service_account_info['secrets']]
secret_data = None
for secret in sa_secrets:
secret_data = get_secret_data(secret, args["namespace"])
if secret_data is not None:
break
# Kubernetes 1.22+ doesn't create the service account token secret by
# default, we have to create one.
secret_json = run_command(['get', 'secret', secret], args['namespace'])
if 'ca.crt' not in secret_json['data'] and 'token' not in secret_json['data']:
continue
secret_data = secret_json
if secret_data is None:
print("No usable secret found for '{}', creating one.".format(args["service_account"]))
token_secret = create_sa_token_secret(tmpdir.name, args["service_account"], args["namespace"])
secret_data = get_secret_data(token_secret, args["namespace"])
if secret_data is None:
exit(
"Failed to generate kubeconfig: No usable credentials found for '{}'.".format(
args["service_account"]
)
)
exit("No usable secret found for '{}'.".format(args['service_account']))
context_name = '{}-{}'.format(args['service_account'], cluster_name)
kube_config = '/tmp/{}.conf'.format(args['service_account'])
context_name = "{}-{}".format(args["service_account"], cluster_name)
kube_config = args["output_file"]
if not kube_config:
kube_config = "/tmp/{}.conf".format(args["service_account"])
with tempfile.NamedTemporaryFile() as ca_crt_file:
ca_crt = base64.b64decode(secret_data['data']['ca.crt'])
ca_crt_file.write(ca_crt)
ca_crt_file.flush()
# create kubeconfig entry
set_cluster_cmd = ['config', 'set-cluster', cluster_name,
'--kubeconfig={}'.format(kube_config),
'--server={}'.format(endpoint.strip('"')),
'--embed-certs=true',
'--certificate-authority={}'.format(ca_crt_file.name)]
run_command(set_cluster_cmd, as_json=False)
user_token = base64.b64decode(secret_data['data']['token']).decode('utf-8')
set_credentials_cmd = ['config', 'set-credentials', context_name,
'--token={}'.format(user_token),
'--kubeconfig={}'.format(kube_config)]
run_command(set_credentials_cmd, as_json=False)
ca_crt_file_name = os.path.join(tmpdir.name, "ca.crt")
ca_crt_file = open(ca_crt_file_name, "wb")
ca_crt_file.write(base64.b64decode(secret_data["data"]["ca.crt"]))
ca_crt_file.close()
# create kubeconfig entry
set_cluster_cmd = [
"config",
"set-cluster",
cluster_name,
"--kubeconfig={}".format(kube_config),
"--server={}".format(endpoint.strip('"')),
"--embed-certs=true",
"--certificate-authority={}".format(ca_crt_file_name),
]
run_command(set_cluster_cmd, as_json=False)
user_token = base64.b64decode(secret_data["data"]["token"]).decode("utf-8")
set_credentials_cmd = [
"config",
"set-credentials",
context_name,
"--token={}".format(user_token),
"--kubeconfig={}".format(kube_config),
]
run_command(set_credentials_cmd, as_json=False, log_command=False)
set_context_cmd = [
"config",
"set-context",
context_name,
"--cluster={}".format(cluster_name),
"--user={}".format(context_name),
"--kubeconfig={}".format(kube_config),
]
set_context_cmd = ['config', 'set-context', context_name,
'--cluster={}'.format(cluster_name),
'--user={}'.format(context_name),
'--kubeconfig={}'.format(kube_config)]
run_command(set_context_cmd, as_json=False)
use_context_cmd = ["config", "use-context", context_name, "--kubeconfig={}".format(kube_config)]
use_context_cmd = ['config', 'use-context', context_name,
'--kubeconfig={}'.format(kube_config)]
run_command(use_context_cmd, as_json=False)
print("Generated the kubeconfig file: {}".format(kube_config))

View File

@ -1,4 +0,0 @@
# OCP compatible values for yugabyte
Image:
repository: "quay.io/yugabyte/yugabyte-ubi"

View File

@ -16,7 +16,7 @@ questions:
label: YugabyteDB image repository
description: "YugabyteDB image repository"
- variable: Image.tag
default: "2.5.1.0-b153"
default: "2.14.1.0-b36"
required: true
type: string
label: YugabyteDB image tag

View File

@ -26,7 +26,7 @@ Generate common labels.
{{- define "yugabyte.labels" }}
heritage: {{ .Values.helm2Legacy | ternary "Tiller" (.Release.Service | quote) }}
release: {{ .Release.Name | quote }}
chart: {{ .Chart.Name | quote }}
chart: {{ .Values.oldNamingStyle | ternary .Chart.Name (include "yugabyte.chart" .) | quote }}
component: {{ .Values.Component | quote }}
{{- if .Values.commonLabels}}
{{ toYaml .Values.commonLabels }}
@ -56,89 +56,6 @@ release: {{ .root.Release.Name | quote }}
{{- end }}
{{- end }}
{{/*
Create secrets in DBNamespace from other namespaces by iterating over envSecrets.
*/}}
{{- define "yugabyte.envsecrets" -}}
{{- range $v := .secretenv }}
{{- if $v.valueFrom.secretKeyRef.namespace }}
{{- $secretObj := (lookup
"v1"
"Secret"
$v.valueFrom.secretKeyRef.namespace
$v.valueFrom.secretKeyRef.name)
| default dict }}
{{- $secretData := (get $secretObj "data") | default dict }}
{{- $secretValue := (get $secretData $v.valueFrom.secretKeyRef.key) | default "" }}
{{- if (and (not $secretValue) (not $v.valueFrom.secretKeyRef.optional)) }}
{{- required (printf "Secret or key missing for %s/%s in namespace: %s"
$v.valueFrom.secretKeyRef.name
$v.valueFrom.secretKeyRef.key
$v.valueFrom.secretKeyRef.namespace)
nil }}
{{- end }}
{{- if $secretValue }}
apiVersion: v1
kind: Secret
metadata:
{{- $secretfullname := printf "%s-%s-%s-%s"
$.root.Release.Name
$v.valueFrom.secretKeyRef.namespace
$v.valueFrom.secretKeyRef.name
$v.valueFrom.secretKeyRef.key
}}
name: {{ printf "%s-%s-%s-%s-%s-%s"
$.root.Release.Name
($v.valueFrom.secretKeyRef.namespace | substr 0 5)
($v.valueFrom.secretKeyRef.name | substr 0 5)
( $v.valueFrom.secretKeyRef.key | substr 0 5)
(sha256sum $secretfullname | substr 0 4)
($.suffix)
| lower | replace "." "" | replace "_" ""
}}
namespace: "{{ $.root.Release.Namespace }}"
labels:
{{- include "yugabyte.labels" $.root | indent 4 }}
type: Opaque # should it be an Opaque secret?
data:
{{ $v.valueFrom.secretKeyRef.key }}: {{ $secretValue | quote }}
{{- end }}
{{- end }}
---
{{- end }}
{{- end }}
{{/*
Add env secrets to DB statefulset.
*/}}
{{- define "yugabyte.addenvsecrets" -}}
{{- range $v := .secretenv }}
- name: {{ $v.name }}
valueFrom:
secretKeyRef:
{{- if $v.valueFrom.secretKeyRef.namespace }}
{{- $secretfullname := printf "%s-%s-%s-%s"
$.root.Release.Name
$v.valueFrom.secretKeyRef.namespace
$v.valueFrom.secretKeyRef.name
$v.valueFrom.secretKeyRef.key
}}
name: {{ printf "%s-%s-%s-%s-%s-%s"
$.root.Release.Name
($v.valueFrom.secretKeyRef.namespace | substr 0 5)
($v.valueFrom.secretKeyRef.name | substr 0 5)
($v.valueFrom.secretKeyRef.key | substr 0 5)
(sha256sum $secretfullname | substr 0 4)
($.suffix)
| lower | replace "." "" | replace "_" ""
}}
{{- else }}
name: {{ $v.valueFrom.secretKeyRef.name }}
{{- end }}
key: {{ $v.valueFrom.secretKeyRef.key }}
optional: {{ $v.valueFrom.secretKeyRef.optional | default "false" }}
{{- end }}
{{- end }}
{{/*
Create Volume name.
*/}}
@ -167,21 +84,18 @@ Generate a preflight check script invocation.
*/}}
{{- define "yugabyte.preflight_check" -}}
{{- if not .Values.preflight.skipAll -}}
{{- $port := .Preflight.Port -}}
{{- range $addr := split "," .Preflight.Addr -}}
if [ -f /home/yugabyte/tools/k8s_preflight.py ]; then
PYTHONUNBUFFERED="true" /home/yugabyte/tools/k8s_preflight.py \
dnscheck \
--addr="{{ $addr }}" \
{{- if not $.Values.preflight.skipBind }}
--port="{{ $port }}"
--addr="{{ .Preflight.Addr }}" \
{{- if not .Values.preflight.skipBind }}
--port="{{ .Preflight.Port }}"
{{- else }}
--skip_bind
{{- end }}
fi && \
{{ end }}
{{- end }}
{{- end }}
{{- end -}}
{{- end -}}
{{/*
Get YugaByte fs data directories.
@ -202,24 +116,26 @@ Get files from fs data directories for readiness / liveness probes.
{{- end -}}
{{/*
Command to do a disk write and sync for liveness probes.
*/}}
{{- define "yugabyte.fs_data_dirs_probe" -}}
echo "disk check at: $(date)" \
| tee {{ template "yugabyte.fs_data_dirs_probe_files" . }} \
&& sync {{ template "yugabyte.fs_data_dirs_probe_files" . }}
{{- end -}}
{{/*
Generate server FQDN.
*/}}
{{- define "yugabyte.server_fqdn" -}}
{{- if .Values.multicluster.createServicePerPod -}}
{{- if (and .Values.istioCompatibility.enabled .Values.multicluster.createServicePerPod) -}}
{{- printf "$(HOSTNAME).$(NAMESPACE).svc.%s" .Values.domainName -}}
{{- else if (and .Values.oldNamingStyle .Values.multicluster.createServiceExports) -}}
{{ $membershipName := required "A valid membership name is required! Please set multicluster.kubernetesClusterId" .Values.multicluster.kubernetesClusterId }}
{{- printf "$(HOSTNAME).%s.%s.$(NAMESPACE).svc.clusterset.local" $membershipName .Service.name -}}
{{- else if .Values.oldNamingStyle -}}
{{- printf "$(HOSTNAME).%s.$(NAMESPACE).svc.%s" .Service.name .Values.domainName -}}
{{- else -}}
{{- if .Values.multicluster.createServiceExports -}}
{{ $membershipName := required "A valid membership name is required! Please set multicluster.kubernetesClusterId" .Values.multicluster.kubernetesClusterId }}
{{- printf "$(HOSTNAME).%s.%s-%s.$(NAMESPACE).svc.clusterset.local" $membershipName (include "yugabyte.fullname" .) .Service.name -}}
{{- else -}}
{{- printf "$(HOSTNAME).%s-%s.$(NAMESPACE).svc.%s" (include "yugabyte.fullname" .) .Service.name .Values.domainName -}}
{{- end -}}
{{- printf "$(HOSTNAME).%s-%s.$(NAMESPACE).svc.%s" (include "yugabyte.fullname" .) .Service.name .Values.domainName -}}
{{- end -}}
{{- end -}}
@ -232,25 +148,10 @@ Generate server broadcast address.
{{/*
Generate server RPC bind address.
In case of multi-cluster services (MCS), we set it to $(POD_IP) to
ensure YCQL uses a resolvable address.
See https://github.com/yugabyte/yugabyte-db/issues/16155
We use a workaround for above in case of Istio by setting it to
$(POD_IP) and localhost. Master doesn't support that combination, so
we stick to 0.0.0.0, which works for master.
*/}}
{{- define "yugabyte.rpc_bind_address" -}}
{{- $port := index .Service.ports "tcp-rpc-port" -}}
{{- if .Values.istioCompatibility.enabled -}}
{{- if (eq .Service.name "yb-masters") -}}
0.0.0.0:{{ $port }}
{{- else -}}
$(POD_IP):{{ $port }},127.0.0.1:{{ $port }}
{{- end -}}
{{- else if (or .Values.multicluster.createServiceExports .Values.multicluster.createServicePerPod) -}}
$(POD_IP):{{ $port }}
0.0.0.0:{{ index .Service.ports "tcp-rpc-port" -}}
{{- else -}}
{{- include "yugabyte.server_fqdn" . -}}
{{- end -}}
@ -267,7 +168,7 @@ Generate server web interface.
Generate server CQL proxy bind address.
*/}}
{{- define "yugabyte.cql_proxy_bind_address" -}}
{{- if or .Values.istioCompatibility.enabled .Values.multicluster.createServiceExports .Values.multicluster.createServicePerPod -}}
{{- if .Values.istioCompatibility.enabled -}}
0.0.0.0:{{ index .Service.ports "tcp-yql-port" -}}
{{- else -}}
{{- include "yugabyte.server_fqdn" . -}}
@ -312,10 +213,10 @@ Compute the maximum number of unavailable pods based on the number of master rep
Set consistent issuer name.
*/}}
{{- define "yugabyte.tls_cm_issuer" -}}
{{- if .Values.tls.certManager.bootstrapSelfsigned -}}
{{ .Values.oldNamingStyle | ternary "yugabyte-selfsigned" (printf "%s-selfsigned" (include "yugabyte.fullname" .)) }}
{{- if .Values.tls.certManager.useClusterIssuer -}}
{{ .Values.tls.certManager.clusterIssuer }}
{{- else -}}
{{ .Values.tls.certManager.useClusterIssuer | ternary .Values.tls.certManager.clusterIssuer .Values.tls.certManager.issuer}}
{{ .Values.oldNamingStyle | ternary "yugabyte-selfsigned" (printf "%s-selfsigned" (include "yugabyte.fullname" .)) }}
{{- end -}}
{{- end -}}
@ -355,51 +256,3 @@ Set consistent issuer name.
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Default nodeAffinity for multi-az deployments
*/}}
{{- define "yugabyte.multiAZNodeAffinity" -}}
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: failure-domain.beta.kubernetes.io/zone
operator: In
values:
- {{ .Values.AZ }}
- matchExpressions:
- key: topology.kubernetes.io/zone
operator: In
values:
- {{ .Values.AZ }}
{{- end -}}
{{/*
Default podAntiAffinity for master and tserver
This requires "appLabelArgs" to be passed in - defined in service.yaml
we have a .root and a .label in appLabelArgs
*/}}
{{- define "yugabyte.podAntiAffinity" -}}
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
{{- if .root.Values.oldNamingStyle }}
- key: app
operator: In
values:
- "{{ .label }}"
{{- else }}
- key: app.kubernetes.io/name
operator: In
values:
- "{{ .label }}"
- key: release
operator: In
values:
- {{ .root.Release.Name | quote }}
{{- end }}
topologyKey: kubernetes.io/hostname
{{- end -}}

View File

@ -1,7 +1,7 @@
{{- $root := . -}}
---
{{- if $root.Values.tls.certManager.enabled }}
{{- if $root.Values.tls.certManager.bootstrapSelfsigned }}
{{- if not $root.Values.tls.certManager.useClusterIssuer }}
---
apiVersion: cert-manager.io/v1
kind: Issuer
@ -37,38 +37,13 @@ spec:
ca:
secretName: {{ $root.Values.oldNamingStyle | ternary "yugabyte-ca" (printf "%s-ca" (include "yugabyte.fullname" $root)) }}
---
{{- else }}
{{/* when bootstrapSelfsigned = false, ie. when using an external CA.
Create a Secret with just the rootCA.cert value and mount into master/tserver pods.
This will be used as a fall back in case the Secret generated by cert-manager does not
have a root ca.crt. This can happen for certain certificate issuers like LetsEncrypt.
*/}}
---
apiVersion: v1
kind: Secret
metadata:
name: {{ printf "%s-root-ca" (include "yugabyte.fullname" $root) }}
namespace: "{{ $root.Release.Namespace }}"
labels:
{{- include "yugabyte.labels" $root | indent 4 }}
type: Opaque
data:
ca.crt: {{ $root.Values.tls.rootCA.cert }}
---
{{- end }}
{{/*
The below Certificate resource will trigger cert-manager to issue crt/key into Secrets.
These secrets are mounted into master/tserver pods.
*/}}
{{- range .Values.Services }}
{{- $service := . -}}
{{- $appLabelArgs := dict "label" .label "root" $root -}}
{{- $serviceValues := (dict "Service" $service "Values" $root.Values "Chart" $root.Chart "Release" $root.Release) -}}
{{- $replicas := (eq .name "yb-masters") | ternary $root.Values.replicas.master $root.Values.replicas.tserver -}}
{{- if (gt (int $replicas) 0) }}
---
apiVersion: cert-manager.io/v1
kind: Certificate
@ -90,29 +65,28 @@ spec:
secretName: {{ $root.Values.oldNamingStyle | ternary (printf "%s-yugabyte-tls-cert" $service.label) (printf "%s-%s-tls-cert" (include "yugabyte.fullname" $root) $service.label) }}
duration: {{ $root.Values.tls.certManager.certificates.duration | quote }}
renewBefore: {{ $root.Values.tls.certManager.certificates.renewBefore | quote }}
commonName: yugabyte-{{ .name }}
isCA: false
privateKey:
algorithm: {{ $root.Values.tls.certManager.certificates.algorithm | quote }}
encoding: PKCS8
size: {{ $root.Values.tls.certManager.certificates.keySize }}
rotationPolicy: Always
usages:
- server auth
- client auth
# At least one of a DNS Name, URI, or IP address is required.
dnsNames:
{{- $replicas := (eq .name "yb-masters") | ternary $root.Values.replicas.master $root.Values.replicas.tserver -}}
{{- range $index := until ( int ( $replicas ) ) }}
{{- $nodeOldStyle := printf "%s-%d.%s.%s.svc.%s" $service.label $index $service.name $root.Release.Namespace $root.Values.domainName }}
{{- $nodeNewStyle := printf "%s-%s-%d.%s-%s.%s.svc.%s" (include "yugabyte.fullname" $root) $service.label $index (include "yugabyte.fullname" $root) $service.name $root.Release.Namespace $root.Values.domainName }}
{{- $node := $root.Values.oldNamingStyle | ternary $nodeOldStyle $nodeNewStyle }}
- {{$node}}
{{- end }}
- {{ printf "%s-%s.%s.svc.%s" (include "yugabyte.fullname" $root) $service.name $root.Release.Namespace $root.Values.domainName }}
uris: []
ipAddresses: []
---
{{- end }}
{{- end }}
---
apiVersion: cert-manager.io/v1
@ -140,7 +114,6 @@ spec:
algorithm: {{ $root.Values.tls.certManager.certificates.algorithm | quote }}
encoding: PKCS8
size: {{ $root.Values.tls.certManager.certificates.keySize }}
rotationPolicy: Always
usages:
- client auth
dnsNames: []

View File

@ -1,23 +0,0 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "yugabyte.fullname" . }}-master-hooks
namespace: "{{ .Release.Namespace }}"
data:
{{- range $index := until ( int ( .Values.replicas.master ) ) }}
yb-master-{{.}}-pre_debug_hook.sh: "echo 'hello-from-pre' "
yb-master-{{.}}-post_debug_hook.sh: "echo 'hello-from-post' "
{{- end }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "yugabyte.fullname" . }}-tserver-hooks
namespace: "{{ .Release.Namespace }}"
data:
{{- range $index := until ( int ( .Values.replicas.tserver) ) }}
yb-tserver-{{.}}-pre_debug_hook.sh: "echo 'hello-from-pre' "
yb-tserver-{{.}}-post_debug_hook.sh: "echo 'hello-from-post' "
{{- end }}
---

View File

@ -11,19 +11,11 @@ metadata:
labels:
{{- include "yugabyte.applabel" ($appLabelArgs) | indent 4 }}
{{- include "yugabyte.labels" $ | indent 4 }}
service-type: "non-endpoint"
spec:
ports:
{{- range $label, $port := $server.ports }}
{{- if (eq $label "grpc-ybc-port") }}
{{- if $.Values.ybc.enabled }}
- name: {{ $label | quote }}
port: {{ $port }}
{{- end }}
{{- else }}
- name: {{ $label | quote }}
port: {{ $port }}
{{- end }}
{{- end}}
selector:
statefulset.kubernetes.io/pod-name: {{ $podName | quote }}

View File

@ -1,21 +0,0 @@
{{- /*
Ref - https://cloud.google.com/kubernetes-engine/docs/how-to/multi-cluster-services#registering_a_service_for_export
https://github.com/kubernetes/enhancements/tree/master/keps/sig-multicluster/1645-multi-cluster-services-api#exporting-services
*/}}
{{- if .Values.multicluster.createServiceExports }}
apiVersion: {{ .Values.multicluster.mcsApiVersion }}
kind: ServiceExport
metadata:
name: {{ .Values.oldNamingStyle | ternary "yb-masters" (printf "%s-%s" (include "yugabyte.fullname" .) "yb-masters") | quote }}
namespace: "{{ .Release.Namespace }}"
labels:
{{- include "yugabyte.labels" . | indent 4 }}
---
apiVersion: {{ .Values.multicluster.mcsApiVersion }}
kind: ServiceExport
metadata:
name: {{ .Values.oldNamingStyle | ternary "yb-tservers" (printf "%s-%s" (include "yugabyte.fullname" .) "yb-tservers") | quote }}
namespace: "{{ .Release.Namespace }}"
labels:
{{- include "yugabyte.labels" . | indent 4 }}
{{ end -}}

Some files were not shown because too many files have changed in this diff Show More