diff --git a/assets/bitnami/kafka-25.1.4.tgz b/assets/bitnami/kafka-25.1.4.tgz new file mode 100644 index 000000000..40bc3920d Binary files /dev/null and b/assets/bitnami/kafka-25.1.4.tgz differ diff --git a/assets/bitnami/wordpress-17.1.4.tgz b/assets/bitnami/wordpress-17.1.4.tgz new file mode 100644 index 000000000..0cfbaa04c Binary files /dev/null and b/assets/bitnami/wordpress-17.1.4.tgz differ diff --git a/assets/fairwinds/polaris-5.14.0.tgz b/assets/fairwinds/polaris-5.14.0.tgz new file mode 100644 index 000000000..61f2db583 Binary files /dev/null and b/assets/fairwinds/polaris-5.14.0.tgz differ diff --git a/assets/harbor/harbor-1.13.0.tgz b/assets/harbor/harbor-1.13.0.tgz new file mode 100644 index 000000000..64a23bfe3 Binary files /dev/null and b/assets/harbor/harbor-1.13.0.tgz differ diff --git a/assets/koor-tech/koor-operator-0.3.6.tgz b/assets/koor-tech/koor-operator-0.3.6.tgz new file mode 100644 index 000000000..995a57148 Binary files /dev/null and b/assets/koor-tech/koor-operator-0.3.6.tgz differ diff --git a/assets/kuma/kuma-2.4.0.tgz b/assets/kuma/kuma-2.4.0.tgz new file mode 100644 index 000000000..9ea30b18e Binary files /dev/null and b/assets/kuma/kuma-2.4.0.tgz differ diff --git a/assets/redpanda/redpanda-5.2.0.tgz b/assets/redpanda/redpanda-5.2.0.tgz new file mode 100644 index 000000000..a4679303e Binary files /dev/null and b/assets/redpanda/redpanda-5.2.0.tgz differ diff --git a/assets/sysdig/sysdig-1.16.9.tgz b/assets/sysdig/sysdig-1.16.9.tgz new file mode 100644 index 000000000..37b834f62 Binary files /dev/null and b/assets/sysdig/sysdig-1.16.9.tgz differ diff --git a/assets/yugabyte/yugabyte-2.14.12.tgz b/assets/yugabyte/yugabyte-2.14.12.tgz new file mode 100644 index 000000000..c0c8ee217 Binary files /dev/null and b/assets/yugabyte/yugabyte-2.14.12.tgz differ diff --git a/assets/yugabyte/yugaware-2.14.12.tgz b/assets/yugabyte/yugaware-2.14.12.tgz new file mode 100644 index 000000000..f0192ab30 Binary files /dev/null and b/assets/yugabyte/yugaware-2.14.12.tgz differ diff --git a/charts/bitnami/kafka/Chart.lock b/charts/bitnami/kafka/Chart.lock index b3009cd7b..86957775a 100644 --- a/charts/bitnami/kafka/Chart.lock +++ b/charts/bitnami/kafka/Chart.lock @@ -1,9 +1,9 @@ dependencies: - name: zookeeper repository: oci://registry-1.docker.io/bitnamicharts - version: 12.1.0 + version: 12.1.1 - name: common repository: oci://registry-1.docker.io/bitnamicharts - version: 2.9.0 -digest: sha256:a54db8d2946ff889eaa08317cdc9eccbfe55722b08c147ee0799925cd1b43c93 -generated: "2023-08-23T10:11:09.64327+02:00" + version: 2.9.1 +digest: sha256:d80576ab604d6ae40689f985ffff711a95525fd2e04df86f7524300fb5c7b6de +generated: "2023-08-30T12:23:35.3141937Z" diff --git a/charts/bitnami/kafka/Chart.yaml b/charts/bitnami/kafka/Chart.yaml index a51e8b4dc..de988de9c 100644 --- a/charts/bitnami/kafka/Chart.yaml +++ b/charts/bitnami/kafka/Chart.yaml @@ -6,15 +6,15 @@ annotations: category: Infrastructure images: | - name: jmx-exporter - image: docker.io/bitnami/jmx-exporter:0.19.0-debian-11-r49 + image: docker.io/bitnami/jmx-exporter:0.19.0-debian-11-r57 - name: kafka-exporter - image: docker.io/bitnami/kafka-exporter:1.7.0-debian-11-r85 + image: docker.io/bitnami/kafka-exporter:1.7.0-debian-11-r93 - name: kafka - image: docker.io/bitnami/kafka:3.5.1-debian-11-r25 + image: docker.io/bitnami/kafka:3.5.1-debian-11-r35 - name: kubectl - image: docker.io/bitnami/kubectl:1.25.12-debian-11-r29 + image: docker.io/bitnami/kubectl:1.25.13-debian-11-r5 - name: os-shell - image: docker.io/bitnami/os-shell:11-debian-11-r43 + image: docker.io/bitnami/os-shell:11-debian-11-r51 licenses: Apache-2.0 apiVersion: v2 appVersion: 3.5.1 @@ -45,4 +45,4 @@ maintainers: name: kafka sources: - https://github.com/bitnami/charts/tree/main/bitnami/kafka -version: 25.1.2 +version: 25.1.4 diff --git a/charts/bitnami/kafka/README.md b/charts/bitnami/kafka/README.md index 705987fd8..e3a36a9f0 100644 --- a/charts/bitnami/kafka/README.md +++ b/charts/bitnami/kafka/README.md @@ -82,7 +82,7 @@ The command removes all the Kubernetes components associated with the chart and | ------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------- | | `image.registry` | Kafka image registry | `docker.io` | | `image.repository` | Kafka image repository | `bitnami/kafka` | -| `image.tag` | Kafka image tag (immutable tags are recommended) | `3.5.1-debian-11-r25` | +| `image.tag` | Kafka image tag (immutable tags are recommended) | `3.5.1-debian-11-r35` | | `image.digest` | Kafka image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | | `image.pullPolicy` | Kafka image pull policy | `IfNotPresent` | | `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | @@ -365,73 +365,73 @@ The command removes all the Kubernetes components associated with the chart and ### Traffic Exposure parameters -| Name | Description | Value | -| ------------------------------------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------- | ----------------------- | -| `service.type` | Kubernetes Service type | `ClusterIP` | -| `service.ports.client` | Kafka svc port for client connections | `9092` | -| `service.ports.controller` | Kafka svc port for controller connections. It is used if "kraft.enabled: true" | `9093` | -| `service.ports.interbroker` | Kafka svc port for inter-broker connections | `9094` | -| `service.ports.external` | Kafka svc port for external connections | `9095` | -| `service.extraPorts` | Extra ports to expose in the Kafka service (normally used with the `sidecar` value) | `[]` | -| `service.nodePorts.client` | Node port for the Kafka client connections | `""` | -| `service.nodePorts.external` | Node port for the Kafka external connections | `""` | -| `service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | -| `service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | -| `service.clusterIP` | Kafka service Cluster IP | `""` | -| `service.loadBalancerIP` | Kafka service Load Balancer IP | `""` | -| `service.loadBalancerSourceRanges` | Kafka service Load Balancer sources | `[]` | -| `service.externalTrafficPolicy` | Kafka service external traffic policy | `Cluster` | -| `service.annotations` | Additional custom annotations for Kafka service | `{}` | -| `service.headless.controller.annotations` | Annotations for the controller-eligible headless service. | `{}` | -| `service.headless.controller.labels` | Labels for the controller-eligible headless service. | `{}` | -| `service.headless.broker.annotations` | Annotations for the broker-only headless service. | `{}` | -| `service.headless.broker.labels` | Labels for the broker-only headless service. | `{}` | -| `externalAccess.enabled` | Enable Kubernetes external cluster access to Kafka brokers | `false` | -| `externalAccess.autoDiscovery.enabled` | Enable using an init container to auto-detect external IPs/ports by querying the K8s API | `false` | -| `externalAccess.autoDiscovery.image.registry` | Init container auto-discovery image registry | `docker.io` | -| `externalAccess.autoDiscovery.image.repository` | Init container auto-discovery image repository | `bitnami/kubectl` | -| `externalAccess.autoDiscovery.image.tag` | Init container auto-discovery image tag (immutable tags are recommended) | `1.25.12-debian-11-r29` | -| `externalAccess.autoDiscovery.image.digest` | Kubectl image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | -| `externalAccess.autoDiscovery.image.pullPolicy` | Init container auto-discovery image pull policy | `IfNotPresent` | -| `externalAccess.autoDiscovery.image.pullSecrets` | Init container auto-discovery image pull secrets | `[]` | -| `externalAccess.autoDiscovery.resources.limits` | The resources limits for the auto-discovery init container | `{}` | -| `externalAccess.autoDiscovery.resources.requests` | The requested resources for the auto-discovery init container | `{}` | -| `externalAccess.controller.forceExpose` | If set to true, force exposing controller-eligible nodes although they are configured as controller-only nodes | `false` | -| `externalAccess.controller.service.type` | Kubernetes Service type for external access. It can be NodePort, LoadBalancer or ClusterIP | `LoadBalancer` | -| `externalAccess.controller.service.ports.external` | Kafka port used for external access when service type is LoadBalancer | `9094` | -| `externalAccess.controller.service.loadBalancerIPs` | Array of load balancer IPs for each Kafka broker. Length must be the same as replicaCount | `[]` | -| `externalAccess.controller.service.loadBalancerNames` | Array of load balancer Names for each Kafka broker. Length must be the same as replicaCount | `[]` | -| `externalAccess.controller.service.loadBalancerAnnotations` | Array of load balancer annotations for each Kafka broker. Length must be the same as replicaCount | `[]` | -| `externalAccess.controller.service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | `[]` | -| `externalAccess.controller.service.nodePorts` | Array of node ports used for each Kafka broker. Length must be the same as replicaCount | `[]` | -| `externalAccess.controller.service.externalIPs` | Use distinct service host IPs to configure Kafka external listener when service type is NodePort. Length must be the same as replicaCount | `[]` | -| `externalAccess.controller.service.useHostIPs` | Use service host IPs to configure Kafka external listener when service type is NodePort | `false` | -| `externalAccess.controller.service.usePodIPs` | using the MY_POD_IP address for external access. | `false` | -| `externalAccess.controller.service.domain` | Domain or external ip used to configure Kafka external listener when service type is NodePort or ClusterIP | `""` | -| `externalAccess.controller.service.publishNotReadyAddresses` | Indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready | `false` | -| `externalAccess.controller.service.labels` | Service labels for external access | `{}` | -| `externalAccess.controller.service.annotations` | Service annotations for external access | `{}` | -| `externalAccess.controller.service.extraPorts` | Extra ports to expose in the Kafka external service | `[]` | -| `externalAccess.broker.service.type` | Kubernetes Service type for external access. It can be NodePort, LoadBalancer or ClusterIP | `LoadBalancer` | -| `externalAccess.broker.service.ports.external` | Kafka port used for external access when service type is LoadBalancer | `9094` | -| `externalAccess.broker.service.loadBalancerIPs` | Array of load balancer IPs for each Kafka broker. Length must be the same as replicaCount | `[]` | -| `externalAccess.broker.service.loadBalancerNames` | Array of load balancer Names for each Kafka broker. Length must be the same as replicaCount | `[]` | -| `externalAccess.broker.service.loadBalancerAnnotations` | Array of load balancer annotations for each Kafka broker. Length must be the same as replicaCount | `[]` | -| `externalAccess.broker.service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | `[]` | -| `externalAccess.broker.service.nodePorts` | Array of node ports used for each Kafka broker. Length must be the same as replicaCount | `[]` | -| `externalAccess.broker.service.externalIPs` | Use distinct service host IPs to configure Kafka external listener when service type is NodePort. Length must be the same as replicaCount | `[]` | -| `externalAccess.broker.service.useHostIPs` | Use service host IPs to configure Kafka external listener when service type is NodePort | `false` | -| `externalAccess.broker.service.usePodIPs` | using the MY_POD_IP address for external access. | `false` | -| `externalAccess.broker.service.domain` | Domain or external ip used to configure Kafka external listener when service type is NodePort or ClusterIP | `""` | -| `externalAccess.broker.service.publishNotReadyAddresses` | Indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready | `false` | -| `externalAccess.broker.service.labels` | Service labels for external access | `{}` | -| `externalAccess.broker.service.annotations` | Service annotations for external access | `{}` | -| `externalAccess.broker.service.extraPorts` | Extra ports to expose in the Kafka external service | `[]` | -| `networkPolicy.enabled` | Specifies whether a NetworkPolicy should be created | `false` | -| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | -| `networkPolicy.explicitNamespacesSelector` | A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed | `{}` | -| `networkPolicy.externalAccess.from` | customize the from section for External Access on tcp-external port | `[]` | -| `networkPolicy.egressRules.customRules` | Custom network policy rule | `{}` | +| Name | Description | Value | +| ------------------------------------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------- | ---------------------- | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.ports.client` | Kafka svc port for client connections | `9092` | +| `service.ports.controller` | Kafka svc port for controller connections. It is used if "kraft.enabled: true" | `9093` | +| `service.ports.interbroker` | Kafka svc port for inter-broker connections | `9094` | +| `service.ports.external` | Kafka svc port for external connections | `9095` | +| `service.extraPorts` | Extra ports to expose in the Kafka service (normally used with the `sidecar` value) | `[]` | +| `service.nodePorts.client` | Node port for the Kafka client connections | `""` | +| `service.nodePorts.external` | Node port for the Kafka external connections | `""` | +| `service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `service.clusterIP` | Kafka service Cluster IP | `""` | +| `service.loadBalancerIP` | Kafka service Load Balancer IP | `""` | +| `service.loadBalancerSourceRanges` | Kafka service Load Balancer sources | `[]` | +| `service.externalTrafficPolicy` | Kafka service external traffic policy | `Cluster` | +| `service.annotations` | Additional custom annotations for Kafka service | `{}` | +| `service.headless.controller.annotations` | Annotations for the controller-eligible headless service. | `{}` | +| `service.headless.controller.labels` | Labels for the controller-eligible headless service. | `{}` | +| `service.headless.broker.annotations` | Annotations for the broker-only headless service. | `{}` | +| `service.headless.broker.labels` | Labels for the broker-only headless service. | `{}` | +| `externalAccess.enabled` | Enable Kubernetes external cluster access to Kafka brokers | `false` | +| `externalAccess.autoDiscovery.enabled` | Enable using an init container to auto-detect external IPs/ports by querying the K8s API | `false` | +| `externalAccess.autoDiscovery.image.registry` | Init container auto-discovery image registry | `docker.io` | +| `externalAccess.autoDiscovery.image.repository` | Init container auto-discovery image repository | `bitnami/kubectl` | +| `externalAccess.autoDiscovery.image.tag` | Init container auto-discovery image tag (immutable tags are recommended) | `1.25.13-debian-11-r5` | +| `externalAccess.autoDiscovery.image.digest` | Kubectl image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `externalAccess.autoDiscovery.image.pullPolicy` | Init container auto-discovery image pull policy | `IfNotPresent` | +| `externalAccess.autoDiscovery.image.pullSecrets` | Init container auto-discovery image pull secrets | `[]` | +| `externalAccess.autoDiscovery.resources.limits` | The resources limits for the auto-discovery init container | `{}` | +| `externalAccess.autoDiscovery.resources.requests` | The requested resources for the auto-discovery init container | `{}` | +| `externalAccess.controller.forceExpose` | If set to true, force exposing controller-eligible nodes although they are configured as controller-only nodes | `false` | +| `externalAccess.controller.service.type` | Kubernetes Service type for external access. It can be NodePort, LoadBalancer or ClusterIP | `LoadBalancer` | +| `externalAccess.controller.service.ports.external` | Kafka port used for external access when service type is LoadBalancer | `9094` | +| `externalAccess.controller.service.loadBalancerIPs` | Array of load balancer IPs for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.controller.service.loadBalancerNames` | Array of load balancer Names for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.controller.service.loadBalancerAnnotations` | Array of load balancer annotations for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.controller.service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | `[]` | +| `externalAccess.controller.service.nodePorts` | Array of node ports used for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.controller.service.externalIPs` | Use distinct service host IPs to configure Kafka external listener when service type is NodePort. Length must be the same as replicaCount | `[]` | +| `externalAccess.controller.service.useHostIPs` | Use service host IPs to configure Kafka external listener when service type is NodePort | `false` | +| `externalAccess.controller.service.usePodIPs` | using the MY_POD_IP address for external access. | `false` | +| `externalAccess.controller.service.domain` | Domain or external ip used to configure Kafka external listener when service type is NodePort or ClusterIP | `""` | +| `externalAccess.controller.service.publishNotReadyAddresses` | Indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready | `false` | +| `externalAccess.controller.service.labels` | Service labels for external access | `{}` | +| `externalAccess.controller.service.annotations` | Service annotations for external access | `{}` | +| `externalAccess.controller.service.extraPorts` | Extra ports to expose in the Kafka external service | `[]` | +| `externalAccess.broker.service.type` | Kubernetes Service type for external access. It can be NodePort, LoadBalancer or ClusterIP | `LoadBalancer` | +| `externalAccess.broker.service.ports.external` | Kafka port used for external access when service type is LoadBalancer | `9094` | +| `externalAccess.broker.service.loadBalancerIPs` | Array of load balancer IPs for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.broker.service.loadBalancerNames` | Array of load balancer Names for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.broker.service.loadBalancerAnnotations` | Array of load balancer annotations for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.broker.service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | `[]` | +| `externalAccess.broker.service.nodePorts` | Array of node ports used for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.broker.service.externalIPs` | Use distinct service host IPs to configure Kafka external listener when service type is NodePort. Length must be the same as replicaCount | `[]` | +| `externalAccess.broker.service.useHostIPs` | Use service host IPs to configure Kafka external listener when service type is NodePort | `false` | +| `externalAccess.broker.service.usePodIPs` | using the MY_POD_IP address for external access. | `false` | +| `externalAccess.broker.service.domain` | Domain or external ip used to configure Kafka external listener when service type is NodePort or ClusterIP | `""` | +| `externalAccess.broker.service.publishNotReadyAddresses` | Indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready | `false` | +| `externalAccess.broker.service.labels` | Service labels for external access | `{}` | +| `externalAccess.broker.service.annotations` | Service annotations for external access | `{}` | +| `externalAccess.broker.service.extraPorts` | Extra ports to expose in the Kafka external service | `[]` | +| `networkPolicy.enabled` | Specifies whether a NetworkPolicy should be created | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.explicitNamespacesSelector` | A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed | `{}` | +| `networkPolicy.externalAccess.from` | customize the from section for External Access on tcp-external port | `[]` | +| `networkPolicy.egressRules.customRules` | Custom network policy rule | `{}` | ### Volume Permissions parameters @@ -440,7 +440,7 @@ The command removes all the Kubernetes components associated with the chart and | `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume | `false` | | `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | | `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/os-shell` | -| `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `11-debian-11-r43` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `11-debian-11-r51` | | `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | | `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | | `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` | @@ -465,7 +465,7 @@ The command removes all the Kubernetes components associated with the chart and | `metrics.kafka.enabled` | Whether or not to create a standalone Kafka exporter to expose Kafka metrics | `false` | | `metrics.kafka.image.registry` | Kafka exporter image registry | `docker.io` | | `metrics.kafka.image.repository` | Kafka exporter image repository | `bitnami/kafka-exporter` | -| `metrics.kafka.image.tag` | Kafka exporter image tag (immutable tags are recommended) | `1.7.0-debian-11-r85` | +| `metrics.kafka.image.tag` | Kafka exporter image tag (immutable tags are recommended) | `1.7.0-debian-11-r93` | | `metrics.kafka.image.digest` | Kafka exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | | `metrics.kafka.image.pullPolicy` | Kafka exporter image pull policy | `IfNotPresent` | | `metrics.kafka.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | @@ -519,7 +519,7 @@ The command removes all the Kubernetes components associated with the chart and | `metrics.jmx.kafkaJmxPort` | JMX port where the exporter will collect metrics, exposed in the Kafka container. | `5555` | | `metrics.jmx.image.registry` | JMX exporter image registry | `docker.io` | | `metrics.jmx.image.repository` | JMX exporter image repository | `bitnami/jmx-exporter` | -| `metrics.jmx.image.tag` | JMX exporter image tag (immutable tags are recommended) | `0.19.0-debian-11-r49` | +| `metrics.jmx.image.tag` | JMX exporter image tag (immutable tags are recommended) | `0.19.0-debian-11-r57` | | `metrics.jmx.image.digest` | JMX exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | | `metrics.jmx.image.pullPolicy` | JMX exporter image pull policy | `IfNotPresent` | | `metrics.jmx.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | @@ -1087,6 +1087,10 @@ This guide is an adaptation from upstream documentation: [Migrate from ZooKeeper ## Upgrading +### To 25.0.0 + +This major updates the Zookeeper subchart to it newest major, 12.0.0. For more information on this subchart's major, please refer to [zookeeper upgrade notes](https://github.com/bitnami/charts/tree/main/bitnami/zookeeper#to-1200). + ### To 24.0.0 This major version is a refactor of the Kafka chart and its architecture, to better adapt to Kraft features introduced in version 22.0.0. @@ -1209,10 +1213,6 @@ kubectl apply -f $NEW_PVC_MANIFEST_FILE Repeat this process for each replica you had in your Kafka cluster. Once completed, upgrade the cluster and the new Statefulset should reuse the existing PVCs. -### To 25.0.0 - -This major updates the Zookeeper subchart to it newest major, 12.0.0. For more information on this subchart's major, please refer to [zookeeper upgrade notes](https://github.com/bitnami/charts/tree/main/bitnami/zookeeper#to-1200). - ### To 23.0.0 This major updates Kafka to its newest version, 3.5.x. For more information, please refer to [kafka upgrade notes](https://kafka.apache.org/35/documentation.html#upgrade). @@ -1424,4 +1424,4 @@ Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and -limitations under the License. +limitations under the License. \ No newline at end of file diff --git a/charts/bitnami/kafka/charts/common/Chart.yaml b/charts/bitnami/kafka/charts/common/Chart.yaml index 644d2a798..5669a24b3 100644 --- a/charts/bitnami/kafka/charts/common/Chart.yaml +++ b/charts/bitnami/kafka/charts/common/Chart.yaml @@ -2,7 +2,7 @@ annotations: category: Infrastructure licenses: Apache-2.0 apiVersion: v2 -appVersion: 2.9.0 +appVersion: 2.9.1 description: A Library Helm Chart for grouping common logic between bitnami charts. This chart is not deployable by itself. home: https://bitnami.com @@ -20,4 +20,4 @@ name: common sources: - https://github.com/bitnami/charts type: library -version: 2.9.0 +version: 2.9.1 diff --git a/charts/bitnami/kafka/charts/common/templates/_affinities.tpl b/charts/bitnami/kafka/charts/common/templates/_affinities.tpl index b77534bb9..e85b1df45 100644 --- a/charts/bitnami/kafka/charts/common/templates/_affinities.tpl +++ b/charts/bitnami/kafka/charts/common/templates/_affinities.tpl @@ -60,12 +60,13 @@ Return a topologyKey definition {{/* Return a soft podAffinity/podAntiAffinity definition -{{ include "common.affinities.pods.soft" (dict "component" "FOO" "customLabels" .Values.podLabels "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "context" $) -}} +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "customLabels" .Values.podLabels "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "extraPodAffinityTerms" .Values.extraPodAffinityTerms "context" $) -}} */}} {{- define "common.affinities.pods.soft" -}} {{- $component := default "" .component -}} {{- $customLabels := default (dict) .customLabels -}} {{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +{{- $extraPodAffinityTerms := default (list) .extraPodAffinityTerms -}} preferredDuringSchedulingIgnoredDuringExecution: - podAffinityTerm: labelSelector: @@ -78,16 +79,30 @@ preferredDuringSchedulingIgnoredDuringExecution: {{- end }} topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} weight: 1 + {{- range $extraPodAffinityTerms }} + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" $.context )) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := .extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + weight: {{ .weight | default 1 -}} + {{- end -}} {{- end -}} {{/* Return a hard podAffinity/podAntiAffinity definition -{{ include "common.affinities.pods.hard" (dict "component" "FOO" "customLabels" .Values.podLabels "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "context" $) -}} +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "customLabels" .Values.podLabels "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "extraPodAffinityTerms" .Values.extraPodAffinityTerms "context" $) -}} */}} {{- define "common.affinities.pods.hard" -}} {{- $component := default "" .component -}} {{- $customLabels := default (dict) .customLabels -}} {{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +{{- $extraPodAffinityTerms := default (list) .extraPodAffinityTerms -}} requiredDuringSchedulingIgnoredDuringExecution: - labelSelector: matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" .context )) | nindent 8 }} @@ -98,6 +113,17 @@ requiredDuringSchedulingIgnoredDuringExecution: {{ $key }}: {{ $value | quote }} {{- end }} topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + {{- range $extraPodAffinityTerms }} + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" $.context )) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := .extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + {{- end -}} {{- end -}} {{/* diff --git a/charts/bitnami/kafka/charts/zookeeper/Chart.yaml b/charts/bitnami/kafka/charts/zookeeper/Chart.yaml index 3318f7bb8..20793df51 100644 --- a/charts/bitnami/kafka/charts/zookeeper/Chart.yaml +++ b/charts/bitnami/kafka/charts/zookeeper/Chart.yaml @@ -26,4 +26,4 @@ maintainers: name: zookeeper sources: - https://github.com/bitnami/charts/tree/main/bitnami/zookeeper -version: 12.1.0 +version: 12.1.1 diff --git a/charts/bitnami/kafka/charts/zookeeper/README.md b/charts/bitnami/kafka/charts/zookeeper/README.md index cc04e11f1..ba1fd588f 100644 --- a/charts/bitnami/kafka/charts/zookeeper/README.md +++ b/charts/bitnami/kafka/charts/zookeeper/README.md @@ -423,6 +423,10 @@ Find more information about how to deal with common errors related to Bitnami's ## Upgrading +### To 12.0.0 + +This new version of the chart includes the new ZooKeeper major version 3.9.x. For more information, please refer to [Zookeeper 3.9.0 Release Notes](https://zookeeper.apache.org/doc/r3.9.0/releasenotes.html) + ### To 11.0.0 This major version removes `commonAnnotations` and `commonLabels` from `volumeClaimTemplates`. Now annotations and labels can be set in volume claims using `persistence.annotations` and `persistence.labels` values. If the previous deployment has already set `commonAnnotations` and/or `commonLabels` values, to ensure a clean upgrade from previous version without loosing data, please set `persistence.annotations` and/or `persistence.labels` values with the same content as the common values. diff --git a/charts/bitnami/kafka/templates/broker/statefulset.yaml b/charts/bitnami/kafka/templates/broker/statefulset.yaml index bca8fd266..d899edb83 100644 --- a/charts/bitnami/kafka/templates/broker/statefulset.yaml +++ b/charts/bitnami/kafka/templates/broker/statefulset.yaml @@ -61,8 +61,8 @@ spec: affinity: {{- include "common.tplvalues.render" (dict "value" .Values.broker.affinity "context" $) | nindent 8 }} {{- else }} affinity: - podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.broker.podAffinityPreset "component" "kafka" "customLabels" $podLabels "context" $) | nindent 10 }} - podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.broker.podAntiAffinityPreset "component" "kafka" "customLabels" $podLabels "context" $) | nindent 10 }} + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.broker.podAffinityPreset "component" "broker" "customLabels" $podLabels "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.broker.podAntiAffinityPreset "component" "broker" "customLabels" $podLabels "context" $) | nindent 10 }} nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.broker.nodeAffinityPreset.type "key" .Values.broker.nodeAffinityPreset.key "values" .Values.broker.nodeAffinityPreset.values) | nindent 10 }} {{- end }} {{- if .Values.broker.nodeSelector }} diff --git a/charts/bitnami/kafka/templates/controller-eligible/statefulset.yaml b/charts/bitnami/kafka/templates/controller-eligible/statefulset.yaml index 072a14b2b..80ddc3727 100644 --- a/charts/bitnami/kafka/templates/controller-eligible/statefulset.yaml +++ b/charts/bitnami/kafka/templates/controller-eligible/statefulset.yaml @@ -61,8 +61,8 @@ spec: affinity: {{- include "common.tplvalues.render" (dict "value" .Values.controller.affinity "context" $) | nindent 8 }} {{- else }} affinity: - podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.controller.podAffinityPreset "component" "kafka" "customLabels" $podLabels "context" $) | nindent 10 }} - podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.controller.podAntiAffinityPreset "component" "kafka" "customLabels" $podLabels "context" $) | nindent 10 }} + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.controller.podAffinityPreset "component" "controller-eligible" "customLabels" $podLabels "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.controller.podAntiAffinityPreset "component" "controller-eligible" "customLabels" $podLabels "context" $) | nindent 10 }} nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.controller.nodeAffinityPreset.type "key" .Values.controller.nodeAffinityPreset.key "values" .Values.controller.nodeAffinityPreset.values) | nindent 10 }} {{- end }} {{- if .Values.controller.nodeSelector }} diff --git a/charts/bitnami/kafka/templates/metrics/deployment.yaml b/charts/bitnami/kafka/templates/metrics/deployment.yaml index bbcea7fba..16eb69f67 100644 --- a/charts/bitnami/kafka/templates/metrics/deployment.yaml +++ b/charts/bitnami/kafka/templates/metrics/deployment.yaml @@ -7,7 +7,7 @@ SPDX-License-Identifier: APACHE-2.0 {{- $releaseNamespace := include "common.names.namespace" . -}} {{- $clusterDomain := .Values.clusterDomain -}} {{- $fullname := include "common.names.fullname" . -}} -{{- $servicePort := int .Values.service.ports.client -}} +{{- $containerPort := int .Values.listeners.client.containerPort -}} apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} kind: Deployment metadata: @@ -40,8 +40,8 @@ spec: affinity: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.kafka.affinity "context" $) | nindent 8 }} {{- else }} affinity: - podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.metrics.kafka.podAffinityPreset "component" "metrics" "customLabels" $podLabels "context" $) | nindent 10 }} - podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.metrics.kafka.podAntiAffinityPreset "component" "metrics" "customLabels" $podLabels "context" $) | nindent 10 }} + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.metrics.kafka.podAffinityPreset "component" "cluster-metrics" "customLabels" $podLabels "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.metrics.kafka.podAntiAffinityPreset "component" "cluster-metrics" "customLabels" $podLabels "context" $) | nindent 10 }} nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.metrics.kafka.nodeAffinityPreset.type "key" .Values.metrics.kafka.nodeAffinityPreset.key "values" .Values.metrics.kafka.nodeAffinityPreset.values) | nindent 10 }} {{- end }} {{- if .Values.metrics.kafka.nodeSelector }} @@ -92,10 +92,10 @@ spec: - | kafka_exporter \ {{- range $i := until (int .Values.controller.replicaCount) }} - --kafka.server={{ $fullname }}-controller-{{ $i }}.{{ $fullname }}-controller-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $servicePort }} \ + --kafka.server={{ $fullname }}-controller-{{ $i }}.{{ $fullname }}-controller-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $containerPort }} \ {{- end }} {{- range $i := until (int .Values.broker.replicaCount) }} - --kafka.server={{ $fullname }}-broker-{{ $i }}.{{ $fullname }}-broker-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $servicePort }} \ + --kafka.server={{ $fullname }}-broker-{{ $i }}.{{ $fullname }}-broker-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $containerPort }} \ {{- end }} {{- if regexFind "SASL" (upper .Values.listeners.client.protocol) }} --sasl.enabled \ diff --git a/charts/bitnami/kafka/values.yaml b/charts/bitnami/kafka/values.yaml index 2c2ffb965..c8e6cb728 100644 --- a/charts/bitnami/kafka/values.yaml +++ b/charts/bitnami/kafka/values.yaml @@ -80,7 +80,7 @@ diagnosticMode: image: registry: docker.io repository: bitnami/kafka - tag: 3.5.1-debian-11-r25 + tag: 3.5.1-debian-11-r35 digest: "" ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' @@ -1254,7 +1254,7 @@ externalAccess: image: registry: docker.io repository: bitnami/kubectl - tag: 1.25.12-debian-11-r29 + tag: 1.25.13-debian-11-r5 digest: "" ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' @@ -1504,7 +1504,7 @@ volumePermissions: image: registry: docker.io repository: bitnami/os-shell - tag: 11-debian-11-r43 + tag: 11-debian-11-r51 digest: "" pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. @@ -1586,7 +1586,7 @@ metrics: image: registry: docker.io repository: bitnami/kafka-exporter - tag: 1.7.0-debian-11-r85 + tag: 1.7.0-debian-11-r93 digest: "" ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' @@ -1840,7 +1840,7 @@ metrics: image: registry: docker.io repository: bitnami/jmx-exporter - tag: 0.19.0-debian-11-r49 + tag: 0.19.0-debian-11-r57 digest: "" ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' diff --git a/charts/bitnami/wordpress/Chart.lock b/charts/bitnami/wordpress/Chart.lock index 103d2831c..2e18fffa0 100644 --- a/charts/bitnami/wordpress/Chart.lock +++ b/charts/bitnami/wordpress/Chart.lock @@ -4,9 +4,9 @@ dependencies: version: 6.6.0 - name: mariadb repository: oci://registry-1.docker.io/bitnamicharts - version: 13.1.0 + version: 13.1.2 - name: common repository: oci://registry-1.docker.io/bitnamicharts - version: 2.9.0 -digest: sha256:19433d22b87927464569967e128b716709f4b8e8c99e59c5b6d00b6c61ed98f4 -generated: "2023-08-23T12:48:27.768104+02:00" + version: 2.9.1 +digest: sha256:5df6e862af69422cc6e287bf9dd560b3a1e56d3b49b4bc81132b0db10903cd80 +generated: "2023-08-30T09:41:25.351778314Z" diff --git a/charts/bitnami/wordpress/Chart.yaml b/charts/bitnami/wordpress/Chart.yaml index 646700ca1..f3668fe61 100644 --- a/charts/bitnami/wordpress/Chart.yaml +++ b/charts/bitnami/wordpress/Chart.yaml @@ -6,14 +6,14 @@ annotations: category: CMS images: | - name: apache-exporter - image: docker.io/bitnami/apache-exporter:1.0.1-debian-11-r23 + image: docker.io/bitnami/apache-exporter:1.0.1-debian-11-r29 - name: os-shell - image: docker.io/bitnami/os-shell:11-debian-11-r45 + image: docker.io/bitnami/os-shell:11-debian-11-r51 - name: wordpress - image: docker.io/bitnami/wordpress:6.3.0-debian-11-r13 + image: docker.io/bitnami/wordpress:6.3.1-debian-11-r0 licenses: Apache-2.0 apiVersion: v2 -appVersion: 6.3.0 +appVersion: 6.3.1 dependencies: - condition: memcached.enabled name: memcached @@ -47,4 +47,4 @@ maintainers: name: wordpress sources: - https://github.com/bitnami/charts/tree/main/bitnami/wordpress -version: 17.1.3 +version: 17.1.4 diff --git a/charts/bitnami/wordpress/README.md b/charts/bitnami/wordpress/README.md index 86919cab6..89eaad526 100644 --- a/charts/bitnami/wordpress/README.md +++ b/charts/bitnami/wordpress/README.md @@ -78,15 +78,15 @@ The command removes all the Kubernetes components associated with the chart and ### WordPress Image parameters -| Name | Description | Value | -| ------------------- | --------------------------------------------------------------------------------------------------------- | --------------------- | -| `image.registry` | WordPress image registry | `docker.io` | -| `image.repository` | WordPress image repository | `bitnami/wordpress` | -| `image.tag` | WordPress image tag (immutable tags are recommended) | `6.3.0-debian-11-r13` | -| `image.digest` | WordPress image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | -| `image.pullPolicy` | WordPress image pull policy | `IfNotPresent` | -| `image.pullSecrets` | WordPress image pull secrets | `[]` | -| `image.debug` | Specify if debug values should be set | `false` | +| Name | Description | Value | +| ------------------- | --------------------------------------------------------------------------------------------------------- | -------------------- | +| `image.registry` | WordPress image registry | `docker.io` | +| `image.repository` | WordPress image repository | `bitnami/wordpress` | +| `image.tag` | WordPress image tag (immutable tags are recommended) | `6.3.1-debian-11-r0` | +| `image.digest` | WordPress image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `image.pullPolicy` | WordPress image pull policy | `IfNotPresent` | +| `image.pullSecrets` | WordPress image pull secrets | `[]` | +| `image.debug` | Specify if debug values should be set | `false` | ### WordPress Configuration parameters @@ -249,7 +249,7 @@ The command removes all the Kubernetes components associated with the chart and | `volumePermissions.enabled` | Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup` | `false` | | `volumePermissions.image.registry` | OS Shell + Utility image registry | `docker.io` | | `volumePermissions.image.repository` | OS Shell + Utility image repository | `bitnami/os-shell` | -| `volumePermissions.image.tag` | OS Shell + Utility image tag (immutable tags are recommended) | `11-debian-11-r45` | +| `volumePermissions.image.tag` | OS Shell + Utility image tag (immutable tags are recommended) | `11-debian-11-r51` | | `volumePermissions.image.digest` | OS Shell + Utility image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | | `volumePermissions.image.pullPolicy` | OS Shell + Utility image pull policy | `IfNotPresent` | | `volumePermissions.image.pullSecrets` | OS Shell + Utility image pull secrets | `[]` | @@ -281,7 +281,7 @@ The command removes all the Kubernetes components associated with the chart and | `metrics.enabled` | Start a sidecar prometheus exporter to expose metrics | `false` | | `metrics.image.registry` | Apache exporter image registry | `docker.io` | | `metrics.image.repository` | Apache exporter image repository | `bitnami/apache-exporter` | -| `metrics.image.tag` | Apache exporter image tag (immutable tags are recommended) | `1.0.1-debian-11-r23` | +| `metrics.image.tag` | Apache exporter image tag (immutable tags are recommended) | `1.0.1-debian-11-r29` | | `metrics.image.digest` | Apache exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | | `metrics.image.pullPolicy` | Apache exporter image pull policy | `IfNotPresent` | | `metrics.image.pullSecrets` | Apache exporter image pull secrets | `[]` | diff --git a/charts/bitnami/wordpress/charts/common/Chart.yaml b/charts/bitnami/wordpress/charts/common/Chart.yaml index 644d2a798..5669a24b3 100644 --- a/charts/bitnami/wordpress/charts/common/Chart.yaml +++ b/charts/bitnami/wordpress/charts/common/Chart.yaml @@ -2,7 +2,7 @@ annotations: category: Infrastructure licenses: Apache-2.0 apiVersion: v2 -appVersion: 2.9.0 +appVersion: 2.9.1 description: A Library Helm Chart for grouping common logic between bitnami charts. This chart is not deployable by itself. home: https://bitnami.com @@ -20,4 +20,4 @@ name: common sources: - https://github.com/bitnami/charts type: library -version: 2.9.0 +version: 2.9.1 diff --git a/charts/bitnami/wordpress/charts/common/templates/_affinities.tpl b/charts/bitnami/wordpress/charts/common/templates/_affinities.tpl index b77534bb9..e85b1df45 100644 --- a/charts/bitnami/wordpress/charts/common/templates/_affinities.tpl +++ b/charts/bitnami/wordpress/charts/common/templates/_affinities.tpl @@ -60,12 +60,13 @@ Return a topologyKey definition {{/* Return a soft podAffinity/podAntiAffinity definition -{{ include "common.affinities.pods.soft" (dict "component" "FOO" "customLabels" .Values.podLabels "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "context" $) -}} +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "customLabels" .Values.podLabels "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "extraPodAffinityTerms" .Values.extraPodAffinityTerms "context" $) -}} */}} {{- define "common.affinities.pods.soft" -}} {{- $component := default "" .component -}} {{- $customLabels := default (dict) .customLabels -}} {{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +{{- $extraPodAffinityTerms := default (list) .extraPodAffinityTerms -}} preferredDuringSchedulingIgnoredDuringExecution: - podAffinityTerm: labelSelector: @@ -78,16 +79,30 @@ preferredDuringSchedulingIgnoredDuringExecution: {{- end }} topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} weight: 1 + {{- range $extraPodAffinityTerms }} + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" $.context )) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := .extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + weight: {{ .weight | default 1 -}} + {{- end -}} {{- end -}} {{/* Return a hard podAffinity/podAntiAffinity definition -{{ include "common.affinities.pods.hard" (dict "component" "FOO" "customLabels" .Values.podLabels "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "context" $) -}} +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "customLabels" .Values.podLabels "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "extraPodAffinityTerms" .Values.extraPodAffinityTerms "context" $) -}} */}} {{- define "common.affinities.pods.hard" -}} {{- $component := default "" .component -}} {{- $customLabels := default (dict) .customLabels -}} {{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +{{- $extraPodAffinityTerms := default (list) .extraPodAffinityTerms -}} requiredDuringSchedulingIgnoredDuringExecution: - labelSelector: matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" .context )) | nindent 8 }} @@ -98,6 +113,17 @@ requiredDuringSchedulingIgnoredDuringExecution: {{ $key }}: {{ $value | quote }} {{- end }} topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + {{- range $extraPodAffinityTerms }} + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" $.context )) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := .extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + {{- end -}} {{- end -}} {{/* diff --git a/charts/bitnami/wordpress/charts/mariadb/Chart.yaml b/charts/bitnami/wordpress/charts/mariadb/Chart.yaml index 455d8d61a..e3cef98b1 100644 --- a/charts/bitnami/wordpress/charts/mariadb/Chart.yaml +++ b/charts/bitnami/wordpress/charts/mariadb/Chart.yaml @@ -33,4 +33,4 @@ maintainers: name: mariadb sources: - https://github.com/bitnami/charts/tree/main/bitnami/mariadb -version: 13.1.0 +version: 13.1.2 diff --git a/charts/bitnami/wordpress/charts/mariadb/templates/primary/svc.yaml b/charts/bitnami/wordpress/charts/mariadb/templates/primary/svc.yaml index f0066aa70..adf3685e0 100644 --- a/charts/bitnami/wordpress/charts/mariadb/templates/primary/svc.yaml +++ b/charts/bitnami/wordpress/charts/mariadb/templates/primary/svc.yaml @@ -27,7 +27,7 @@ spec: externalTrafficPolicy: {{ .Values.primary.service.externalTrafficPolicy | quote }} {{- end }} {{- if and (eq .Values.primary.service.type "LoadBalancer") .Values.primary.service.loadBalancerSourceRanges }} - loadBalancerSourceRanges: {{ .Values.primary.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.primary.service.loadBalancerSourceRanges | nindent 4 }} {{ end }} {{- if (and (eq .Values.primary.service.type "LoadBalancer") (not (empty .Values.primary.service.loadBalancerIP))) }} loadBalancerIP: {{ .Values.primary.service.loadBalancerIP }} diff --git a/charts/bitnami/wordpress/charts/mariadb/templates/secondary/svc.yaml b/charts/bitnami/wordpress/charts/mariadb/templates/secondary/svc.yaml index 30388144c..36fd81042 100644 --- a/charts/bitnami/wordpress/charts/mariadb/templates/secondary/svc.yaml +++ b/charts/bitnami/wordpress/charts/mariadb/templates/secondary/svc.yaml @@ -28,7 +28,7 @@ spec: externalTrafficPolicy: {{ .Values.secondary.service.externalTrafficPolicy | quote }} {{- end }} {{- if and (eq .Values.secondary.service.type "LoadBalancer") .Values.secondary.service.loadBalancerSourceRanges }} - loadBalancerSourceRanges: {{ .Values.secondary.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.secondary.service.loadBalancerSourceRanges | nindent 4 }} {{ end }} {{- if and (eq .Values.secondary.service.type "LoadBalancer") (not (empty .Values.secondary.service.loadBalancerIP)) }} loadBalancerIP: {{ .Values.secondary.service.loadBalancerIP }} diff --git a/charts/bitnami/wordpress/values.yaml b/charts/bitnami/wordpress/values.yaml index 5ac7f6ad9..35374622d 100644 --- a/charts/bitnami/wordpress/values.yaml +++ b/charts/bitnami/wordpress/values.yaml @@ -76,7 +76,7 @@ diagnosticMode: image: registry: docker.io repository: bitnami/wordpress - tag: 6.3.0-debian-11-r13 + tag: 6.3.1-debian-11-r0 digest: "" ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' @@ -766,7 +766,7 @@ volumePermissions: image: registry: docker.io repository: bitnami/os-shell - tag: 11-debian-11-r45 + tag: 11-debian-11-r51 digest: "" pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. @@ -860,7 +860,7 @@ metrics: image: registry: docker.io repository: bitnami/apache-exporter - tag: 1.0.1-debian-11-r23 + tag: 1.0.1-debian-11-r29 digest: "" pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. diff --git a/charts/fairwinds/polaris/Chart.yaml b/charts/fairwinds/polaris/Chart.yaml index f3551a4fd..71a403186 100644 --- a/charts/fairwinds/polaris/Chart.yaml +++ b/charts/fairwinds/polaris/Chart.yaml @@ -12,4 +12,4 @@ maintainers: - email: robertb@fairwinds.com name: rbren name: polaris -version: 5.13.0 +version: 5.14.0 diff --git a/charts/fairwinds/polaris/README.md b/charts/fairwinds/polaris/README.md index 5e9ed0452..b4e0a2948 100644 --- a/charts/fairwinds/polaris/README.md +++ b/charts/fairwinds/polaris/README.md @@ -79,6 +79,7 @@ the 0.10.0 version of this chart will only work on kubernetes 1.14.0+ | webhook.validate | bool | `true` | Enables the Validating Webhook, to reject resources with issues | | webhook.mutate | bool | `false` | Enables the Mutating Webhook, to modify resources with issues | | webhook.replicas | int | `2` | Number of replicas | +| webhook.logLevel | string | `"info"` | Set the logging level for the Webhook command | | webhook.nodeSelector | object | `{}` | Webhook pod nodeSelector | | webhook.tolerations | list | `[]` | Webhook pod tolerations | | webhook.affinity | object | `{}` | Webhook pods affinity | diff --git a/charts/fairwinds/polaris/templates/webhook.deployment.yaml b/charts/fairwinds/polaris/templates/webhook.deployment.yaml index 1e085c5fc..dc8c6fe5e 100644 --- a/charts/fairwinds/polaris/templates/webhook.deployment.yaml +++ b/charts/fairwinds/polaris/templates/webhook.deployment.yaml @@ -52,6 +52,9 @@ spec: {{- end }} - --validate={{ .Values.webhook.validate }} - --mutate={{ .Values.webhook.mutate }} + {{- if .Values.webhook.logLevel }} + - --log-level={{ .Values.webhook.logLevel }} + {{- end }} image: '{{.Values.image.repository}}:{{.Values.image.tag | default .Chart.AppVersion }}' imagePullPolicy: '{{.Values.image.pullPolicy}}' ports: diff --git a/charts/fairwinds/polaris/values.yaml b/charts/fairwinds/polaris/values.yaml index 024462a6f..cd8461828 100644 --- a/charts/fairwinds/polaris/values.yaml +++ b/charts/fairwinds/polaris/values.yaml @@ -139,6 +139,8 @@ webhook: mutate: false # webhook.replicas -- Number of replicas replicas: 2 + # webhook.logLevel -- Set the logging level for the Webhook command + logLevel: info # webhook.nodeSelector -- Webhook pod nodeSelector nodeSelector: {} # webhook.tolerations -- Webhook pod tolerations diff --git a/charts/harbor/harbor/Chart.yaml b/charts/harbor/harbor/Chart.yaml index 7a69cca9f..6b093bb1a 100644 --- a/charts/harbor/harbor/Chart.yaml +++ b/charts/harbor/harbor/Chart.yaml @@ -4,11 +4,11 @@ annotations: catalog.cattle.io/kube-version: '>=1.20-0' catalog.cattle.io/release-name: harbor apiVersion: v1 -appVersion: 2.8.4 +appVersion: 2.9.0 description: An open source trusted cloud native registry that stores, signs, and scans content home: https://goharbor.io -icon: https://raw.githubusercontent.com/goharbor/website/master/static/img/logos/harbor-icon-color.png +icon: https://raw.githubusercontent.com/goharbor/website/main/static/img/logos/harbor-icon-color.png keywords: - docker - registry @@ -24,4 +24,4 @@ name: harbor sources: - https://github.com/goharbor/harbor - https://github.com/goharbor/harbor-helm -version: 1.12.4 +version: 1.13.0 diff --git a/charts/harbor/harbor/README.md b/charts/harbor/harbor/README.md index 85cc537c6..f30598cc0 100644 --- a/charts/harbor/harbor/README.md +++ b/charts/harbor/harbor/README.md @@ -1,6 +1,6 @@ # Helm Chart for Harbor -**Notes:** The master branch is in heavy development, please use the other stable versions instead. A highly available solution for Harbor based on chart can be find [here](docs/High%20Availability.md). And refer to the [guide](docs/Upgrade.md) to upgrade the existing deployment. +**Notes:** The master branch is in heavy development, please use the other stable versions instead. A highly available solution for Harbor based on chart can be found [here](docs/High%20Availability.md). And refer to the [guide](docs/Upgrade.md) to upgrade the existing deployment. This repository, including the issues, focuses on deploying Harbor chart via helm. For functionality issues or Harbor questions, please open issues on [goharbor/harbor](https://github.com/goharbor/harbor) @@ -38,7 +38,7 @@ The following items can be set via `--set` flag during installation or configure The external URL for Harbor core service is used to: 1. populate the docker/helm commands showed on portal -2. populate the token service URL returned to docker/notary client +2. populate the token service URL returned to docker client Format: `protocol://domain[:port]`. Usually: @@ -83,37 +83,30 @@ The following table lists the configurable parameters of the Harbor chart and th | `expose.tls.certSource` | The source of the TLS certificate. Set as `auto`, `secret` or `none` and fill the information in the corresponding section: 1) auto: generate the TLS certificate automatically 2) secret: read the TLS certificate from the specified secret. The TLS certificate can be generated manually or by cert manager 3) none: configure no TLS certificate for the ingress. If the default TLS certificate is configured in the ingress controller, choose this option | `auto` | | `expose.tls.auto.commonName` | The common name used to generate the certificate, it's necessary when the type isn't `ingress` | | | `expose.tls.secret.secretName` | The name of secret which contains keys named: `tls.crt` - the certificate; `tls.key` - the private key | | -| `expose.tls.secret.notarySecretName` | The name of secret which contains keys named: `tls.crt` - the certificate; `tls.key` - the private key. Only needed when the `expose.type` is `ingress` | | | `expose.ingress.hosts.core` | The host of Harbor core service in ingress rule | `core.harbor.domain` | -| `expose.ingress.hosts.notary` | The host of Harbor Notary service in ingress rule | `notary.harbor.domain` | | `expose.ingress.controller` | The ingress controller type. Currently supports `default`, `gce`, `alb`, `f5-bigip` and `ncp` | `default` | | `expose.ingress.kubeVersionOverride` | Allows the ability to override the kubernetes version used while templating the ingress | | | `expose.ingress.annotations` | The annotations used commonly for ingresses | | | `expose.ingress.harbor.annotations` | The annotations specific to harbor ingress | {} | | `expose.ingress.harbor.labels` | The labels specific to harbor ingress | {} | -| `expose.ingress.notary.annotations` | The annotations specific to notary ingress | {} | -| `expose.ingress.notary.labels` | The labels specific to notary ingress | {} | | `expose.clusterIP.name` | The name of ClusterIP service | `harbor` | | `expose.clusterIP.annotations` | The annotations attached to the ClusterIP service | {} | | `expose.clusterIP.ports.httpPort` | The service port Harbor listens on when serving HTTP | `80` | | `expose.clusterIP.ports.httpsPort` | The service port Harbor listens on when serving HTTPS | `443` | -| `expose.clusterIP.ports.notaryPort` | The service port Notary listens on. Only needed when `notary.enabled` is set to `true` | `4443` | | `expose.nodePort.name` | The name of NodePort service | `harbor` | | `expose.nodePort.ports.http.port` | The service port Harbor listens on when serving HTTP | `80` | | `expose.nodePort.ports.http.nodePort` | The node port Harbor listens on when serving HTTP | `30002` | | `expose.nodePort.ports.https.port` | The service port Harbor listens on when serving HTTPS | `443` | | `expose.nodePort.ports.https.nodePort` | The node port Harbor listens on when serving HTTPS | `30003` | -| `expose.nodePort.ports.notary.port` | The service port Notary listens on. Only needed when `notary.enabled` is set to `true` | `4443` | -| `expose.nodePort.ports.notary.nodePort` | The node port Notary listens on. Only needed when `notary.enabled` is set to `true` | `30004` | | `expose.loadBalancer.name` | The name of service | `harbor` | | `expose.loadBalancer.IP` | The IP of the loadBalancer. It only works when loadBalancer supports assigning IP | `""` | | `expose.loadBalancer.ports.httpPort` | The service port Harbor listens on when serving HTTP | `80` | | `expose.loadBalancer.ports.httpsPort` | The service port Harbor listens on when serving HTTPS | `30002` | -| `expose.loadBalancer.ports.notaryPort` | The service port Notary listens on. Only needed when `notary.enabled` is set to `true` | | | `expose.loadBalancer.annotations` | The annotations attached to the loadBalancer service | {} | | `expose.loadBalancer.sourceRanges` | List of IP address ranges to assign to loadBalancerSourceRanges | [] | | **Internal TLS** | | | | `internalTLS.enabled` | Enable TLS for the components (core, jobservice, portal, registry, trivy) | `false` | +| `internalTLS.strong_ssl_ciphers` | Enable strong ssl ciphers for nginx and portal | `false` | `internalTLS.certSource` | Method to provide TLS for the components, options are `auto`, `manual`, `secret`. | `auto` | | `internalTLS.trustCa` | The content of trust CA, only available when `certSource` is `manual`. **Note**: all the internal certificates of the components must be issued by this CA | | | `internalTLS.core.secretName` | The secret name for core component, only available when `certSource` is `secret`. The secret must contain keys named: `ca.crt` - the CA certificate which is used to issue internal key and crt pair for components and all Harbor components must be issued by the same CA, `tls.crt` - the content of the TLS cert file, `tls.key` - the content of the TLS key file. | | @@ -201,6 +194,7 @@ The following table lists the configurable parameters of the Harbor chart and th | `nginx.nodeSelector` | Node labels for pod assignment | `{}` | | `nginx.tolerations` | Tolerations for pod assignment | `[]` | | `nginx.affinity` | Node/Pod affinities | `{}` | +| `nginx.topologySpreadConstraints` | Constraints that define how Pods are spread across failure-domains like regions or availability zones | `[]` | | `nginx.podAnnotations` | Annotations to add to the nginx pod | `{}` | | `nginx.priorityClassName` | The priority class to run the pod as | | | **Portal** | | | @@ -213,6 +207,7 @@ The following table lists the configurable parameters of the Harbor chart and th | `portal.nodeSelector` | Node labels for pod assignment | `{}` | | `portal.tolerations` | Tolerations for pod assignment | `[]` | | `portal.affinity` | Node/Pod affinities | `{}` | +| `portal.topologySpreadConstraints` | Constraints that define how Pods are spread across failure-domains like regions or availability zones | `[]` | | `portal.podAnnotations` | Annotations to add to the portal pod | `{}` | | `portal.priorityClassName` | The priority class to run the pod as | | | **Core** | | | @@ -226,8 +221,11 @@ The following table lists the configurable parameters of the Harbor chart and th | `core.nodeSelector` | Node labels for pod assignment | `{}` | | `core.tolerations` | Tolerations for pod assignment | `[]` | | `core.affinity` | Node/Pod affinities | `{}` | +| `core.topologySpreadConstraints` | Constraints that define how Pods are spread across failure-domains like regions or availability zones | `[]` | | `core.podAnnotations` | Annotations to add to the core pod | `{}` | | `core.serviceAnnotations` | Annotations to add to the core service | `{}` | +| `core.configureUserSettings` | A JSON string to set in the environment variable `CONFIG_OVERWRITE_JSON` to configure user settings. See the [official docs](https://goharbor.io/docs/latest/install-config/configure-user-settings-cli/#configure-users-settings-using-an-environment-variable). | | +| `core.quotaUpdateProvider` | The provider for updating project quota(usage), there are 2 options, redis or db. By default it is implemented by db but you can configure it to redis which can improve the performance of high concurrent pushing to the same project, and reduce the database connections spike and occupies. Using redis will bring up some delay for quota usage updation for display, so only suggest switch provider to redis if you were ran into the db connections spike around the scenario of high concurrent pushing to same project, no improvment for other scenes. | `db` | | `core.secret` | Secret is used when core server communicates with other components. If a secret key is not specified, Helm will generate one. Must be a string of 16 chars. | | | `core.secretName` | Fill the name of a kubernetes secret if you want to use your own TLS certificate and private key for token encryption/decryption. The secret must contain keys named: `tls.crt` - the certificate and `tls.key` - the private key. The default key pair will be used if it isn't set | | | `core.tokenKey` | PEM-formatted RSA private key used to sign service tokens. Only used if `core.secretName` is unset. If set, `core.tokenCert` MUST also be set. | | @@ -253,6 +251,7 @@ The following table lists the configurable parameters of the Harbor chart and th | `jobservice.nodeSelector` | Node labels for pod assignment | `{}` | | `jobservice.tolerations` | Tolerations for pod assignment | `[]` | | `jobservice.affinity` | Node/Pod affinities | `{}` | +| `jobservice.topologySpreadConstraints` | Constraints that define how Pods are spread across failure-domains like regions or availability zones | `[]` | | `jobservice.podAnnotations` | Annotations to add to the jobservice pod | `{}` | | `jobservice.priorityClassName` | The priority class to run the pod as | | | `jobservice.secret` | Secret is used when job service communicates with other components. If a secret key is not specified, Helm will generate one. Must be a string of 16 chars. | | @@ -269,12 +268,13 @@ The following table lists the configurable parameters of the Harbor chart and th | `registry.automountServiceAccountToken` | Mount serviceAccountToken? | `false` | | `registry.tolerations` | Tolerations for pod assignment | `[]` | | `registry.affinity` | Node/Pod affinities | `{}` | +| `registry.topologySpreadConstraints` | Constraints that define how Pods are spread across failure-domains like regions or availability zones | `[]` | | `registry.middleware` | Middleware is used to add support for a CDN between backend storage and `docker pull` recipient. See [official docs](https://github.com/docker/distribution/blob/master/docs/configuration.md#middleware). | | | `registry.podAnnotations` | Annotations to add to the registry pod | `{}` | | `registry.priorityClassName` | The priority class to run the pod as | | | `registry.secret` | Secret is used to secure the upload state from client and registry storage backend. See [official docs](https://github.com/docker/distribution/blob/master/docs/configuration.md#http). If a secret key is not specified, Helm will generate one. Must be a string of 16 chars. | | -| `registry.credentials.username` | The username for accessing the registry instance, which is hosted by htpasswd auth mode. More details see [official docs](https://github.com/docker/distribution/blob/master/docs/configuration.md#htpasswd). | `harbor_registry_user` | -| `registry.credentials.password` | The password for accessing the registry instance, which is hosted by htpasswd auth mode. More details see [official docs](https://github.com/docker/distribution/blob/master/docs/configuration.md#htpasswd). It is suggested you update this value before installation. | `harbor_registry_password` | +| `registry.credentials.username` | The username that harbor core uses internally to access the registry instance. Together with the `registry.credentials.password`, a htpasswd  is created. This is an alternative to providing `registry.credentials.htpasswdString`. For more details see [official docs](https://github.com/docker/distribution/blob/master/docs/configuration.md#htpasswd). | `harbor_registry_user` | +| `registry.credentials.password` | The password that harbor core uses internally to access the registry instance. Together with the `registry.credentials.username`, a htpasswd  is created. This is an alternative to providing `registry.credentials.htpasswdString`. For more details see [official docs](https://github.com/docker/distribution/blob/master/docs/configuration.md#htpasswd). It is suggested you update this value before installation. | `harbor_registry_password` | | `registry.credentials.existingSecret` | An existing secret containing the password for accessing the registry instance, which is hosted by htpasswd auth mode. More details see [official docs](https://github.com/docker/distribution/blob/master/docs/configuration.md#htpasswd). The key must be `REGISTRY_PASSWD` | `""` | | `registry.credentials.htpasswdString` | Login and password in htpasswd string format. Excludes `registry.credentials.username` and `registry.credentials.password`. May come in handy when integrating with tools like argocd or flux. This allows the same line to be generated each time the template is rendered, instead of the `htpasswd` function from helm, which generates different lines each time because of the salt. | undefined | | `registry.relativeurls` | If true, the registry returns relative URLs in Location headers. The client is responsible for resolving the correct URL. Needed if harbor is behind a reverse proxy | `false` | @@ -300,26 +300,7 @@ The following table lists the configurable parameters of the Harbor chart and th | `trivy.timeout` | The duration to wait for scan completion | `5m0s` | | `trivy.gitHubToken` | The GitHub access token to download [Trivy DB][trivy-db] (see [GitHub rate limiting][trivy-rate-limiting]) | | | `trivy.priorityClassName` | The priority class to run the pod as | | -| **Notary** | | | -| `notary.enabled` | Enable Notary? | `true` | -| `notary.server.image.repository` | Repository for notary server image | `goharbor/notary-server-photon` | -| `notary.server.image.tag` | Tag for notary server image | `dev` | -| `notary.server.replicas` | The replica count | `1` | -| `notary.server.resources` | The [resources] to allocate for container | undefined | -| `notary.server.priorityClassName` | The priority class to run the pod as | | -| `notary.server.automountServiceAccountToken` | Mount serviceAccountToken? | `false` | -| `notary.signer.image.repository` | Repository for notary signer image | `goharbor/notary-signer-photon` | -| `notary.signer.image.tag` | Tag for notary signer image | `dev` | -| `notary.signer.replicas` | The replica count | `1` | -| `notary.signer.resources` | The [resources] to allocate for container | undefined | -| `notary.signer.priorityClassName` | The priority class to run the pod as | | -| `notary.signer.automountServiceAccountToken` | Mount serviceAccountToken? | `false` | -| `notary.nodeSelector` | Node labels for pod assignment | `{}` | -| `notary.tolerations` | Tolerations for pod assignment | `[]` | -| `notary.affinity` | Node/Pod affinities | `{}` | -| `notary.podAnnotations` | Annotations to add to the notary pod | `{}` | -| `notary.serviceAnnotations` | Annotations to add to the notary service | `{}` | -| `notary.secretName` | Fill the name of a kubernetes secret if you want to use your own TLS certificate authority, certificate and private key for notary communications. The secret must contain keys named `ca.crt`, `tls.crt` and `tls.key` that contain the CA, certificate and private key. They will be generated if not set. | | +| `trivy.topologySpreadConstraints` | The priority class to run the pod as | | | **Database** | | | | `database.type` | If external database is used, set it to `external` | `internal` | | `database.internal.image.repository` | Repository for database image | `goharbor/harbor-db` | @@ -341,8 +322,6 @@ The following table lists the configurable parameters of the Harbor chart and th | `database.external.username` | The username of external database | `user` | | `database.external.password` | The password of external database | `password` | | `database.external.coreDatabase` | The database used by core service | `registry` | -| `database.external.notaryServerDatabase` | The database used by Notary server | `notary_server` | -| `database.external.notarySignerDatabase` | The database used by Notary signer | `notary_signer` | | `database.external.existingSecret` | An existing password containing the database password. the key must be `password`. | `""` | | `database.external.sslmode` | Connection method of external database (require, verify-full, verify-ca, disable) | `disable` | | `database.maxIdleConns` | The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained. | `50` | @@ -384,6 +363,7 @@ The following table lists the configurable parameters of the Harbor chart and th | `exporter.nodeSelector` | Node labels for pod assignment | `{}` | | `exporter.tolerations` | Tolerations for pod assignment | `[]` | | `exporter.affinity` | Node/Pod affinities | `{}` | +| `exporter.topologySpreadConstraints` | Constraints that define how Pods are spread across failure-domains like regions or availability zones | `[]` | | `exporter.automountServiceAccountToken` | Mount serviceAccountToken? | `false` | | `exporter.cacheDuration` | the cache duration for information that exporter collected from Harbor | `30` | | `exporter.cacheCleanInterval` | cache clean interval for information that exporter collected from Harbor | `14400` | diff --git a/charts/harbor/harbor/conf/notary-server.json b/charts/harbor/harbor/conf/notary-server.json deleted file mode 100644 index b3c262413..000000000 --- a/charts/harbor/harbor/conf/notary-server.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "server": { - "http_addr": ":4443" - }, - "trust_service": { - "type": "remote", - "hostname": "{{ template "harbor.notary-signer" . }}", - "port": "7899", - "tls_ca_file": "/etc/ssl/notary/ca.crt", - "key_algorithm": "ecdsa" - }, - "logging": { - "level": "{{ .Values.logLevel }}" - }, - "storage": { - "backend": "postgres", - "db_url": "{{ template "harbor.database.notaryServer" . }}" - }, - "auth": { - "type": "token", - "options": { - "realm": "{{ .Values.externalURL }}/service/token", - "service": "harbor-notary", - "issuer": "harbor-token-issuer", - "rootcertbundle": "/root.crt" - } - } -} \ No newline at end of file diff --git a/charts/harbor/harbor/conf/notary-signer.json b/charts/harbor/harbor/conf/notary-signer.json deleted file mode 100644 index 75a4d68bd..000000000 --- a/charts/harbor/harbor/conf/notary-signer.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "server": { - "grpc_addr": ":7899", - "tls_cert_file": "/etc/ssl/notary/tls.crt", - "tls_key_file": "/etc/ssl/notary/tls.key" - }, - "logging": { - "level": "{{ .Values.logLevel }}" - }, - "storage": { - "backend": "postgres", - "db_url": "{{ template "harbor.database.notarySigner" . }}", - "default_alias": "defaultalias" - } -} \ No newline at end of file diff --git a/charts/harbor/harbor/templates/_helpers.tpl b/charts/harbor/harbor/templates/_helpers.tpl index 130ad5c04..95a28a6c5 100644 --- a/charts/harbor/harbor/templates/_helpers.tpl +++ b/charts/harbor/harbor/templates/_helpers.tpl @@ -111,22 +111,6 @@ app: "{{ template "harbor.name" . }}" {{- end -}} {{- end -}} -{{- define "harbor.database.notaryServerDatabase" -}} - {{- if eq .Values.database.type "internal" -}} - {{- printf "%s" "notaryserver" -}} - {{- else -}} - {{- .Values.database.external.notaryServerDatabase -}} - {{- end -}} -{{- end -}} - -{{- define "harbor.database.notarySignerDatabase" -}} - {{- if eq .Values.database.type "internal" -}} - {{- printf "%s" "notarysigner" -}} - {{- else -}} - {{- .Values.database.external.notarySignerDatabase -}} - {{- end -}} -{{- end -}} - {{- define "harbor.database.sslmode" -}} {{- if eq .Values.database.type "internal" -}} {{- printf "%s" "disable" -}} @@ -135,14 +119,6 @@ app: "{{ template "harbor.name" . }}" {{- end -}} {{- end -}} -{{- define "harbor.database.notaryServer" -}} -postgres://{{ template "harbor.database.username" . }}:{{ template "harbor.database.escapedRawPassword" . }}@{{ template "harbor.database.host" . }}:{{ template "harbor.database.port" . }}/{{ template "harbor.database.notaryServerDatabase" . }}?sslmode={{ template "harbor.database.sslmode" . }} -{{- end -}} - -{{- define "harbor.database.notarySigner" -}} -postgres://{{ template "harbor.database.username" . }}:{{ template "harbor.database.escapedRawPassword" . }}@{{ template "harbor.database.host" . }}:{{ template "harbor.database.port" . }}/{{ template "harbor.database.notarySignerDatabase" . }}?sslmode={{ template "harbor.database.sslmode" . }} -{{- end -}} - {{- define "harbor.redis.scheme" -}} {{- with .Values.redis }} {{- ternary "redis+sentinel" "redis" (and (eq .type "external" ) (not (not .external.sentinelMasterSet))) }} @@ -263,14 +239,6 @@ postgres://{{ template "harbor.database.username" . }}:{{ template "harbor.datab {{- printf "%s-trivy" (include "harbor.fullname" .) -}} {{- end -}} -{{- define "harbor.notary-server" -}} - {{- printf "%s-notary-server" (include "harbor.fullname" .) -}} -{{- end -}} - -{{- define "harbor.notary-signer" -}} - {{- printf "%s-notary-signer" (include "harbor.fullname" .) -}} -{{- end -}} - {{- define "harbor.nginx" -}} {{- printf "%s-nginx" (include "harbor.fullname" .) -}} {{- end -}} @@ -283,12 +251,8 @@ postgres://{{ template "harbor.database.username" . }}:{{ template "harbor.datab {{- printf "%s-ingress" (include "harbor.fullname" .) -}} {{- end -}} -{{- define "harbor.ingress-notary" -}} - {{- printf "%s-ingress-notary" (include "harbor.fullname" .) -}} -{{- end -}} - {{- define "harbor.noProxy" -}} - {{- printf "%s,%s,%s,%s,%s,%s,%s,%s,%s,%s" (include "harbor.core" .) (include "harbor.jobservice" .) (include "harbor.database" .) (include "harbor.notary-server" .) (include "harbor.notary-signer" .) (include "harbor.registry" .) (include "harbor.portal" .) (include "harbor.trivy" .) (include "harbor.exporter" .) .Values.proxy.noProxy -}} + {{- printf "%s,%s,%s,%s,%s,%s,%s,%s" (include "harbor.core" .) (include "harbor.jobservice" .) (include "harbor.database" .) (include "harbor.registry" .) (include "harbor.portal" .) (include "harbor.trivy" .) (include "harbor.exporter" .) .Values.proxy.noProxy -}} {{- end -}} {{- define "harbor.caBundleVolume" -}} @@ -303,7 +267,7 @@ postgres://{{ template "harbor.database.username" . }}:{{ template "harbor.datab subPath: ca.crt {{- end -}} -{{/* scheme for all components except notary because it only support http mode */}} +{{/* scheme for all components because it only support http mode */}} {{- define "harbor.component.scheme" -}} {{- if .Values.internalTLS.enabled -}} {{- printf "https" -}} @@ -506,16 +470,6 @@ postgres://{{ template "harbor.database.username" . }}:{{ template "harbor.datab {{- end -}} {{- end -}} -{{- define "harbor.tlsNotarySecretForIngress" -}} - {{- if eq .Values.expose.tls.certSource "none" -}} - {{- printf "" -}} - {{- else if eq .Values.expose.tls.certSource "secret" -}} - {{- .Values.expose.tls.secret.notarySecretName -}} - {{- else -}} - {{- include "harbor.ingress" . -}} - {{- end -}} -{{- end -}} - {{- define "harbor.tlsSecretForNginx" -}} {{- if eq .Values.expose.tls.certSource "secret" -}} {{- .Values.expose.tls.secret.secretName -}} @@ -537,7 +491,7 @@ postgres://{{ template "harbor.database.username" . }}:{{ template "harbor.datab TRACE_SAMPLE_RATE: "{{ .Values.trace.sample_rate }}" TRACE_NAMESPACE: "{{ .Values.trace.namespace }}" {{- if .Values.trace.attributes }} - TRACE_ATTRIBUTES: "{{ .Values.trace.attributes | toJson }}" + TRACE_ATTRIBUTES: {{ .Values.trace.attributes | toJson | squote }} {{- end }} {{- if eq .Values.trace.provider "jaeger" }} TRACE_JAEGER_ENDPOINT: "{{ .Values.trace.jaeger.endpoint }}" diff --git a/charts/harbor/harbor/templates/core/core-cm.yaml b/charts/harbor/harbor/templates/core/core-cm.yaml index 96562cc06..7d284c899 100644 --- a/charts/harbor/harbor/templates/core/core-cm.yaml +++ b/charts/harbor/harbor/templates/core/core-cm.yaml @@ -26,8 +26,6 @@ data: JOBSERVICE_URL: "{{ template "harbor.jobserviceURL" . }}" REGISTRY_URL: "{{ template "harbor.registryURL" . }}" TOKEN_SERVICE_URL: "{{ template "harbor.tokenServiceURL" . }}" - WITH_NOTARY: "{{ .Values.notary.enabled }}" - NOTARY_URL: "http://{{ template "harbor.notary-server" . }}:4443" CORE_LOCAL_URL: "{{ ternary "https://127.0.0.1:8443" "http://127.0.0.1:8080" .Values.internalTLS.enabled }}" WITH_TRIVY: {{ .Values.trivy.enabled | quote }} TRIVY_ADAPTER_URL: "{{ template "harbor.trivyAdapterURL" . }}" @@ -83,3 +81,7 @@ data: CACHE_ENABLED: "true" CACHE_EXPIRE_HOURS: "{{ .Values.cache.expireHours }}" {{- end }} + + {{- if .Values.core.quotaUpdateProvider }} + QUOTA_UPDATE_PROVIDER: "{{ .Values.core.quotaUpdateProvider }}" + {{- end }} \ No newline at end of file diff --git a/charts/harbor/harbor/templates/core/core-dpl.yaml b/charts/harbor/harbor/templates/core/core-dpl.yaml index 2ca8b0c5e..8d202498d 100644 --- a/charts/harbor/harbor/templates/core/core-dpl.yaml +++ b/charts/harbor/harbor/templates/core/core-dpl.yaml @@ -17,6 +17,9 @@ spec: labels: {{ include "harbor.matchLabels" . | indent 8 }} component: core +{{- if .Values.core.podLabels }} +{{ toYaml .Values.core.podLabels | indent 8 }} +{{- end }} annotations: checksum/configmap: {{ include (print $.Template.BasePath "/core/core-cm.yaml") . | sha256sum }} checksum/secret: {{ include (print $.Template.BasePath "/core/core-secret.yaml") . | sha256sum }} @@ -42,6 +45,16 @@ spec: {{- end }} automountServiceAccountToken: {{ .Values.core.automountServiceAccountToken | default false }} terminationGracePeriodSeconds: 120 +{{- with .Values.core.topologySpreadConstraints}} + topologySpreadConstraints: +{{- range . }} + - {{ . | toYaml | indent 8 | trim }} + labelSelector: + matchLabels: +{{ include "harbor.matchLabels" $ | indent 12 }} + component: core +{{- end }} +{{- end }} containers: - name: core image: {{ .Values.core.image.repository }}:{{ .Values.core.image.tag }} @@ -117,6 +130,9 @@ spec: name: {{ .Values.registry.credentials.existingSecret }} key: REGISTRY_PASSWD {{- end }} +{{- with .Values.core.extraEnvVars }} +{{- toYaml . | nindent 10 }} +{{- end }} ports: - containerPort: {{ template "harbor.core.containerPort" . }} volumeMounts: diff --git a/charts/harbor/harbor/templates/core/core-secret.yaml b/charts/harbor/harbor/templates/core/core-secret.yaml index 20f835b1d..23b352b47 100644 --- a/charts/harbor/harbor/templates/core/core-secret.yaml +++ b/charts/harbor/harbor/templates/core/core-secret.yaml @@ -25,4 +25,7 @@ data: REGISTRY_CREDENTIAL_PASSWORD: {{ .Values.registry.credentials.password | b64enc | quote }} {{- end }} CSRF_KEY: {{ .Values.core.xsrfKey | default (randAlphaNum 32) | b64enc | quote }} +{{- if .Values.core.configureUserSettings }} + CONFIG_OVERWRITE_JSON: {{ .Values.core.configureUserSettings | b64enc | quote }} +{{- end }} {{- template "harbor.traceJaegerPassword" . }} diff --git a/charts/harbor/harbor/templates/database/database-ss.yaml b/charts/harbor/harbor/templates/database/database-ss.yaml index 733243c71..3b08b07ef 100644 --- a/charts/harbor/harbor/templates/database/database-ss.yaml +++ b/charts/harbor/harbor/templates/database/database-ss.yaml @@ -19,6 +19,9 @@ spec: labels: {{ include "harbor.labels" . | indent 8 }} component: database +{{- if .Values.database.podLabels }} +{{ toYaml .Values.database.podLabels | indent 8 }} +{{- end }} annotations: checksum/secret: {{ include (print $.Template.BasePath "/database/database-secret.yaml") . | sha256sum }} {{- if .Values.database.podAnnotations }} @@ -102,6 +105,9 @@ spec: # more detail refer to https://github.com/goharbor/harbor-helm/issues/756 - name: PGDATA value: "/var/lib/postgresql/data/pgdata" +{{- with .Values.database.internal.extraEnvVars }} +{{- toYaml . | nindent 10 }} +{{- end }} volumeMounts: - name: database-data mountPath: /var/lib/postgresql/data diff --git a/charts/harbor/harbor/templates/exporter/exporter-dpl.yaml b/charts/harbor/harbor/templates/exporter/exporter-dpl.yaml index 5ff36f48a..6d2e1f53a 100644 --- a/charts/harbor/harbor/templates/exporter/exporter-dpl.yaml +++ b/charts/harbor/harbor/templates/exporter/exporter-dpl.yaml @@ -18,7 +18,15 @@ spec: labels: {{ include "harbor.labels" . | indent 8 }} component: exporter +{{- if .Values.exporter.podLabels }} +{{ toYaml .Values.exporter.podLabels | indent 8 }} +{{- end }} annotations: +{{- if and .Values.internalTLS.enabled (eq .Values.internalTLS.certSource "auto") }} + checksum/tls: {{ include (print $.Template.BasePath "/internal/auto-tls.yaml") . | sha256sum }} +{{- else if and .Values.internalTLS.enabled (eq .Values.internalTLS.certSource "manual") }} + checksum/tls: {{ include (print $.Template.BasePath "/core/core-tls.yaml") . | sha256sum }} +{{- end }} {{- if .Values.exporter.podAnnotations }} {{ toYaml .Values.exporter.podAnnotations | indent 8 }} {{- end }} @@ -34,6 +42,16 @@ spec: {{- toYaml . | nindent 8 }} {{- end }} automountServiceAccountToken: {{ .Values.exporter.automountServiceAccountToken | default false }} +{{- with .Values.exporter.topologySpreadConstraints }} + topologySpreadConstraints: +{{- range . }} + - {{ . | toYaml | indent 8 | trim }} + labelSelector: + matchLabels: +{{ include "harbor.matchLabels" $ | indent 12 }} + component: exporter +{{- end }} +{{- end }} containers: - name: exporter image: {{ .Values.exporter.image.repository }}:{{ .Values.exporter.image.tag }} @@ -74,6 +92,10 @@ spec: {{- if .Values.exporter.resources }} resources: {{ toYaml .Values.exporter.resources | indent 10 }} +{{- end }} +{{- with .Values.exporter.extraEnvVars }} + env: +{{- toYaml . | nindent 10 }} {{- end }} ports: - containerPort: {{ template "harbor.core.containerPort" . }} diff --git a/charts/harbor/harbor/templates/ingress/ingress.yaml b/charts/harbor/harbor/templates/ingress/ingress.yaml index eedd13604..e4c06939c 100644 --- a/charts/harbor/harbor/templates/ingress/ingress.yaml +++ b/charts/harbor/harbor/templates/ingress/ingress.yaml @@ -8,7 +8,6 @@ {{- $_ := set . "v2_path" "/v2/*" -}} {{- $_ := set . "chartrepo_path" "/chartrepo/*" -}} {{- $_ := set . "controller_path" "/c/*" -}} - {{- $_ := set . "notary_path" "/" -}} {{- else if eq .Values.expose.ingress.controller "ncp" }} {{- $_ := set . "portal_path" "/.*" -}} {{- $_ := set . "api_path" "/api/.*" -}} @@ -16,7 +15,6 @@ {{- $_ := set . "v2_path" "/v2/.*" -}} {{- $_ := set . "chartrepo_path" "/chartrepo/.*" -}} {{- $_ := set . "controller_path" "/c/.*" -}} - {{- $_ := set . "notary_path" "/.*" -}} {{- else }} {{- $_ := set . "portal_path" "/" -}} {{- $_ := set . "api_path" "/api/" -}} @@ -24,7 +22,6 @@ {{- $_ := set . "v2_path" "/v2/" -}} {{- $_ := set . "chartrepo_path" "/chartrepo/" -}} {{- $_ := set . "controller_path" "/c/" -}} - {{- $_ := set . "notary_path" "/" -}} {{- end }} --- @@ -145,65 +142,4 @@ spec: host: {{ $ingress.hosts.core }} {{- end }} -{{- if .Values.notary.enabled }} ---- -{{- if semverCompare "<1.14-0" (include "harbor.ingress.kubeVersion" .) }} -apiVersion: extensions/v1beta1 -{{- else if semverCompare "<1.19-0" (include "harbor.ingress.kubeVersion" .) }} -apiVersion: networking.k8s.io/v1beta1 -{{- else }} -apiVersion: networking.k8s.io/v1 -{{- end }} -kind: Ingress -metadata: - name: "{{ template "harbor.ingress-notary" . }}" - labels: -{{ include "harbor.labels" . | indent 4 }} -{{- if $ingress.notary.labels }} -{{ toYaml $ingress.notary.labels | indent 4 }} -{{- end }} - annotations: -{{ toYaml $ingress.annotations | indent 4 }} -{{- if eq .Values.expose.ingress.controller "ncp" }} - ncp/use-regex: "true" - {{- if $tls.enabled }} - ncp/http-redirect: "true" - {{- end }} -{{- end }} -{{- if $ingress.notary.annotations }} -{{ toYaml $ingress.notary.annotations | indent 4 }} -{{- end }} -spec: - {{- if $ingress.className }} - ingressClassName: {{ $ingress.className }} - {{- end }} - {{- if $tls.enabled }} - tls: - - secretName: {{ template "harbor.tlsNotarySecretForIngress" . }} - {{- if $ingress.hosts.notary }} - hosts: - - {{ $ingress.hosts.notary }} - {{- end }} - {{- end }} - rules: - - http: - paths: - - path: {{ .notary_path }} -{{- if semverCompare "<1.19-0" (include "harbor.ingress.kubeVersion" .) }} - backend: - serviceName: {{ template "harbor.notary-server" . }} - servicePort: 4443 -{{- else }} - pathType: Prefix - backend: - service: - name: {{ template "harbor.notary-server" . }} - port: - number: 4443 -{{- end -}} - {{- if $ingress.hosts.notary }} - host: {{ $ingress.hosts.notary }} - {{- end }} -{{- end }} - {{- end }} diff --git a/charts/harbor/harbor/templates/ingress/secret.yaml b/charts/harbor/harbor/templates/ingress/secret.yaml index 0d89af99a..41507b3dd 100644 --- a/charts/harbor/harbor/templates/ingress/secret.yaml +++ b/charts/harbor/harbor/templates/ingress/secret.yaml @@ -1,6 +1,6 @@ {{- if eq (include "harbor.autoGenCertForIngress" .) "true" }} {{- $ca := genCA "harbor-ca" 365 }} -{{- $cert := genSignedCert .Values.expose.ingress.hosts.core nil (list .Values.expose.ingress.hosts.core .Values.expose.ingress.hosts.notary) 365 $ca }} +{{- $cert := genSignedCert .Values.expose.ingress.hosts.core nil (list .Values.expose.ingress.hosts.core) 365 $ca }} apiVersion: v1 kind: Secret metadata: diff --git a/charts/harbor/harbor/templates/jobservice/jobservice-dpl.yaml b/charts/harbor/harbor/templates/jobservice/jobservice-dpl.yaml index 32df454b1..32df97db7 100644 --- a/charts/harbor/harbor/templates/jobservice/jobservice-dpl.yaml +++ b/charts/harbor/harbor/templates/jobservice/jobservice-dpl.yaml @@ -22,6 +22,9 @@ spec: labels: {{ include "harbor.labels" . | indent 8 }} component: jobservice +{{- if .Values.jobservice.podLabels }} +{{ toYaml .Values.jobservice.podLabels | indent 8 }} +{{- end }} annotations: checksum/configmap: {{ include (print $.Template.BasePath "/jobservice/jobservice-cm.yaml") . | sha256sum }} checksum/configmap-env: {{ include (print $.Template.BasePath "/jobservice/jobservice-cm-env.yaml") . | sha256sum }} @@ -48,6 +51,16 @@ spec: {{- end }} automountServiceAccountToken: {{ .Values.jobservice.automountServiceAccountToken | default false }} terminationGracePeriodSeconds: 120 +{{- with .Values.jobservice.topologySpreadConstraints}} + topologySpreadConstraints: +{{- range . }} + - {{ . | toYaml | indent 8 | trim }} + labelSelector: + matchLabels: +{{ include "harbor.matchLabels" $ | indent 12 }} + component: jobservice +{{- end }} +{{- end }} containers: - name: jobservice image: {{ .Values.jobservice.image.repository }}:{{ .Values.jobservice.image.tag }} @@ -93,6 +106,9 @@ spec: name: {{ .Values.registry.credentials.existingSecret }} key: REGISTRY_PASSWD {{- end }} +{{- with .Values.jobservice.extraEnvVars }} +{{- toYaml . | nindent 10 }} +{{- end }} envFrom: - configMapRef: name: "{{ template "harbor.jobservice" . }}-env" diff --git a/charts/harbor/harbor/templates/metrics/metrics-svcmon.yaml b/charts/harbor/harbor/templates/metrics/metrics-svcmon.yaml index ad8522974..1122ef01e 100644 --- a/charts/harbor/harbor/templates/metrics/metrics-svcmon.yaml +++ b/charts/harbor/harbor/templates/metrics/metrics-svcmon.yaml @@ -1,4 +1,4 @@ -{{- if and ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: diff --git a/charts/harbor/harbor/templates/nginx/configmap-https.yaml b/charts/harbor/harbor/templates/nginx/configmap-https.yaml index 74c667e00..56c943a61 100644 --- a/charts/harbor/harbor/templates/nginx/configmap-https.yaml +++ b/charts/harbor/harbor/templates/nginx/configmap-https.yaml @@ -36,12 +36,6 @@ data: server "{{ template "harbor.portal" . }}:{{ template "harbor.portal.servicePort" . }}"; } - {{- if .Values.notary.enabled }} - upstream notary-server { - server {{ template "harbor.notary-server" . }}:4443; - } - {{- end }} - log_format timed_combined '[$time_local]:$remote_addr - ' '"$request" $status $body_bytes_sent ' '"$http_referer" "$http_user_agent" ' @@ -54,47 +48,6 @@ data: "" $scheme; } - {{- if .Values.notary.enabled }} - server { - {{- if .Values.ipFamily.ipv4.enabled }} - listen 4443 ssl; - {{- end}} - {{- if .Values.ipFamily.ipv6.enabled}} - listen [::]:4443 ssl; - {{- end }} - server_tokens off; - # ssl - ssl_certificate /etc/nginx/cert/tls.crt; - ssl_certificate_key /etc/nginx/cert/tls.key; - - # recommendations from https://raymii.org/s/tutorials/strong_ssl_security_on_nginx.html - ssl_protocols tlsv1.2; - ssl_ciphers '!aNULL:kECDH+AESGCM:ECDH+AESGCM:RSA+AESGCM:kECDH+AES:ECDH+AES:RSA+AES:'; - ssl_prefer_server_ciphers on; - ssl_session_cache shared:ssl:10m; - - # disable any limits to avoid http 413 for large image uploads - client_max_body_size 0; - - # required to avoid http 411: see issue #1486 (https://github.com/docker/docker/issues/1486) - chunked_transfer_encoding on; - - location /v2/ { - proxy_pass http://notary-server/v2/; - proxy_set_header Host $http_host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $x_forwarded_proto; - - proxy_buffering off; - proxy_request_buffering off; - - proxy_send_timeout 900; - proxy_read_timeout 900; - } - } - {{- end }} - server { {{- if .Values.ipFamily.ipv4.enabled }} listen 8443 ssl; @@ -109,8 +62,12 @@ data: ssl_certificate_key /etc/nginx/cert/tls.key; # Recommendations from https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html - ssl_protocols TLSv1.2; + ssl_protocols TLSv1.2 TLSv1.3; + {{- if .Values.internalTLS.strong_ssl_ciphers }} + ssl_ciphers ECDHE+AESGCM:DHE+AESGCM:ECDHE+RSA+SHA256:DHE+RSA+SHA256:!AES128; + {{ else }} ssl_ciphers '!aNULL:kECDH+AESGCM:ECDH+AESGCM:RSA+AESGCM:kECDH+AES:ECDH+AES:RSA+AES:'; + {{- end }} ssl_prefer_server_ciphers on; ssl_session_cache shared:SSL:10m; diff --git a/charts/harbor/harbor/templates/nginx/deployment.yaml b/charts/harbor/harbor/templates/nginx/deployment.yaml index bc1de0abf..8290d497b 100644 --- a/charts/harbor/harbor/templates/nginx/deployment.yaml +++ b/charts/harbor/harbor/templates/nginx/deployment.yaml @@ -18,6 +18,9 @@ spec: labels: {{ include "harbor.labels" . | indent 8 }} component: nginx +{{- if .Values.nginx.podLabels }} +{{ toYaml .Values.nginx.podLabels | indent 8 }} +{{- end }} annotations: {{- if not .Values.expose.tls.enabled }} checksum/configmap: {{ include (print $.Template.BasePath "/nginx/configmap-http.yaml") . | sha256sum }} @@ -42,6 +45,16 @@ spec: {{- toYaml . | nindent 8 }} {{- end }} automountServiceAccountToken: {{ .Values.nginx.automountServiceAccountToken | default false }} +{{- with .Values.nginx.topologySpreadConstraints}} + topologySpreadConstraints: +{{- range . }} + - {{ . | toYaml | indent 8 | trim }} + labelSelector: + matchLabels: +{{ include "harbor.matchLabels" $ | indent 12 }} + component: nginx +{{- end }} +{{- end }} containers: - name: nginx image: "{{ .Values.nginx.image.repository }}:{{ .Values.nginx.image.tag }}" @@ -69,6 +82,10 @@ spec: {{- if .Values.nginx.resources }} resources: {{ toYaml .Values.nginx.resources | indent 10 }} +{{- end }} +{{- with .Values.nginx.extraEnvVars }} + env: +{{- toYaml . | nindent 10 }} {{- end }} ports: - containerPort: 8080 diff --git a/charts/harbor/harbor/templates/nginx/service.yaml b/charts/harbor/harbor/templates/nginx/service.yaml index df4da0944..12021bfd1 100644 --- a/charts/harbor/harbor/templates/nginx/service.yaml +++ b/charts/harbor/harbor/templates/nginx/service.yaml @@ -22,11 +22,6 @@ spec: port: {{ $clusterIP.ports.httpsPort }} targetPort: 8443 {{- end }} - {{- if .Values.notary.enabled }} - - name: notary - port: {{ $clusterIP.ports.notaryPort }} - targetPort: 4443 - {{- end }} {{- else if eq .Values.expose.type "nodePort" }} {{- $nodePort := .Values.expose.nodePort }} name: {{ $nodePort.name }} @@ -49,14 +44,6 @@ spec: nodePort: {{ $nodePort.ports.https.nodePort }} {{- end }} {{- end }} - {{- if .Values.notary.enabled }} - - name: notary - port: {{ $nodePort.ports.notary.port }} - targetPort: 4443 - {{- if $nodePort.ports.notary.nodePort }} - nodePort: {{ $nodePort.ports.notary.nodePort }} - {{- end }} - {{- end }} {{- else if eq .Values.expose.type "loadBalancer" }} {{- $loadBalancer := .Values.expose.loadBalancer }} name: {{ $loadBalancer.name }} @@ -84,11 +71,6 @@ spec: port: {{ $loadBalancer.ports.httpsPort }} targetPort: 8443 {{- end }} - {{- if .Values.notary.enabled }} - - name: notary - port: {{ $loadBalancer.ports.notaryPort }} - targetPort: 4443 - {{- end }} {{- end }} selector: {{ include "harbor.matchLabels" . | indent 4 }} diff --git a/charts/harbor/harbor/templates/notary/notary-secret.yaml b/charts/harbor/harbor/templates/notary/notary-secret.yaml deleted file mode 100644 index 6de63dd8c..000000000 --- a/charts/harbor/harbor/templates/notary/notary-secret.yaml +++ /dev/null @@ -1,22 +0,0 @@ -{{- if and .Values.notary.enabled }} -apiVersion: v1 -kind: Secret -metadata: - name: {{ template "harbor.notary-server" . }} - labels: -{{ include "harbor.labels" . | indent 4 }} - component: notary -type: Opaque -data: - {{- if not .Values.notary.secretName }} - {{- $ca := genCA "harbor-notary-ca" 365 }} - {{- $cert := genSignedCert (include "harbor.notary-signer" .) nil (list (include "harbor.notary-signer" .)) 365 $ca }} - ca.crt: {{ $ca.Cert | b64enc | quote }} - tls.crt: {{ $cert.Cert | b64enc | quote }} - tls.key: {{ $cert.Key | b64enc | quote }} - {{- end }} - server.json: {{ tpl (.Files.Get "conf/notary-server.json") . | b64enc }} - signer.json: {{ tpl (.Files.Get "conf/notary-signer.json") . | b64enc }} - NOTARY_SERVER_DB_URL: {{ include "harbor.database.notaryServer" . | b64enc }} - NOTARY_SIGNER_DB_URL: {{ include "harbor.database.notarySigner" . | b64enc }} -{{- end }} diff --git a/charts/harbor/harbor/templates/notary/notary-server.yaml b/charts/harbor/harbor/templates/notary/notary-server.yaml deleted file mode 100644 index 64cfd293f..000000000 --- a/charts/harbor/harbor/templates/notary/notary-server.yaml +++ /dev/null @@ -1,111 +0,0 @@ -{{ if .Values.notary.enabled }} -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ template "harbor.notary-server" . }} - labels: -{{ include "harbor.labels" . | indent 4 }} - component: notary-server -spec: - replicas: {{ .Values.notary.server.replicas }} - selector: - matchLabels: -{{ include "harbor.matchLabels" . | indent 6 }} - component: notary-server - template: - metadata: - labels: -{{ include "harbor.labels" . | indent 8 }} - component: notary-server - annotations: - checksum/secret: {{ include (print $.Template.BasePath "/notary/notary-secret.yaml") . | sha256sum }} - checksum/secret-core: {{ include (print $.Template.BasePath "/core/core-secret.yaml") . | sha256sum }} -{{- if .Values.notary.server.podAnnotations }} -{{ toYaml .Values.notary.server.podAnnotations | indent 8 }} -{{- end }} - spec: - securityContext: - runAsUser: 10000 - fsGroup: 10000 -{{- if .Values.notary.server.serviceAccountName }} - serviceAccountName: {{ .Values.notary.server.serviceAccountName }} -{{- end -}} - {{- with .Values.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} - automountServiceAccountToken: {{ .Values.notary.server.automountServiceAccountToken | default false }} - containers: - - name: notary-server - image: {{ .Values.notary.server.image.repository }}:{{ .Values.notary.server.image.tag }} - imagePullPolicy: {{ .Values.imagePullPolicy }} - livenessProbe: - httpGet: - path: /_notary_server/health - scheme: "HTTP" - port: 4443 - initialDelaySeconds: 300 - periodSeconds: 10 - readinessProbe: - httpGet: - path: /_notary_server/health - scheme: "HTTP" - port: 4443 - initialDelaySeconds: 20 - periodSeconds: 10 -{{- if .Values.notary.server.resources }} - resources: -{{ toYaml .Values.notary.server.resources | indent 10 }} -{{- end }} - env: - - name: MIGRATIONS_PATH - value: migrations/server/postgresql - - name: DB_URL - valueFrom: - secretKeyRef: - name: {{ template "harbor.notary-server" . }} - key: NOTARY_SERVER_DB_URL - volumeMounts: - - name: config - mountPath: /etc/notary/server-config.postgres.json - subPath: server.json - - name: token-service-certificate - mountPath: /root.crt - subPath: tls.crt - - name: signer-certificate - mountPath: /etc/ssl/notary/ca.crt - subPath: ca.crt - volumes: - - name: config - secret: - secretName: "{{ template "harbor.notary-server" . }}" - - name: token-service-certificate - secret: - {{- if .Values.core.secretName }} - secretName: {{ .Values.core.secretName }} - {{- else }} - secretName: {{ template "harbor.core" . }} - {{- end }} - - name: signer-certificate - secret: - {{- if .Values.notary.secretName }} - secretName: {{ .Values.notary.secretName }} - {{- else }} - secretName: {{ template "harbor.notary-server" . }} - {{- end }} - {{- with .Values.notary.server.nodeSelector }} - nodeSelector: -{{ toYaml . | indent 8 }} - {{- end }} - {{- with .Values.notary.server.affinity }} - affinity: -{{ toYaml . | indent 8 }} - {{- end }} - {{- with .Values.notary.server.tolerations }} - tolerations: -{{ toYaml . | indent 8 }} - {{- end }} - {{- if .Values.notary.server.priorityClassName }} - priorityClassName: {{ .Values.notary.server.priorityClassName }} - {{- end }} -{{ end }} diff --git a/charts/harbor/harbor/templates/notary/notary-signer.yaml b/charts/harbor/harbor/templates/notary/notary-signer.yaml deleted file mode 100644 index d94e4909b..000000000 --- a/charts/harbor/harbor/templates/notary/notary-signer.yaml +++ /dev/null @@ -1,105 +0,0 @@ -{{ if .Values.notary.enabled }} -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ template "harbor.notary-signer" . }} - labels: -{{ include "harbor.labels" . | indent 4 }} - component: notary-signer -spec: - replicas: {{ .Values.notary.signer.replicas }} - selector: - matchLabels: -{{ include "harbor.matchLabels" . | indent 6 }} - component: notary-signer - template: - metadata: - labels: -{{ include "harbor.labels" . | indent 8 }} - component: notary-signer - annotations: - checksum/secret: {{ include (print $.Template.BasePath "/notary/notary-secret.yaml") . | sha256sum }} -{{- if .Values.notary.signer.podAnnotations }} -{{ toYaml .Values.notary.signer.podAnnotations | indent 8 }} -{{- end }} - spec: - securityContext: - runAsUser: 10000 - fsGroup: 10000 -{{- if .Values.notary.signer.serviceAccountName }} - serviceAccountName: {{ .Values.notary.signer.serviceAccountName }} -{{- end -}} - {{- with .Values.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} - automountServiceAccountToken: {{ .Values.notary.signer.automountServiceAccountToken | default false }} - containers: - - name: notary-signer - image: {{ .Values.notary.signer.image.repository }}:{{ .Values.notary.signer.image.tag }} - imagePullPolicy: {{ .Values.imagePullPolicy }} - livenessProbe: - httpGet: - path: / - scheme: "HTTPS" - port: 7899 - initialDelaySeconds: 300 - periodSeconds: 10 - readinessProbe: - httpGet: - path: / - scheme: "HTTPS" - port: 7899 - initialDelaySeconds: 20 - periodSeconds: 10 -{{- if .Values.notary.signer.resources }} - resources: -{{ toYaml .Values.notary.signer.resources | indent 10 }} -{{- end }} - env: - - name: MIGRATIONS_PATH - value: migrations/signer/postgresql - - name: DB_URL - valueFrom: - secretKeyRef: - name: {{ template "harbor.notary-server" . }} - key: NOTARY_SIGNER_DB_URL - - name: NOTARY_SIGNER_DEFAULTALIAS - value: defaultalias - volumeMounts: - - name: config - mountPath: /etc/notary/signer-config.postgres.json - subPath: signer.json - - name: signer-certificate - mountPath: /etc/ssl/notary/tls.crt - subPath: tls.crt - - name: signer-certificate - mountPath: /etc/ssl/notary/tls.key - subPath: tls.key - volumes: - - name: config - secret: - secretName: "{{ template "harbor.notary-server" . }}" - - name: signer-certificate - secret: - {{- if .Values.notary.secretName }} - secretName: {{ .Values.notary.secretName }} - {{- else }} - secretName: {{ template "harbor.notary-server" . }} - {{- end }} - {{- with .Values.notary.signer.nodeSelector }} - nodeSelector: -{{ toYaml . | indent 8 }} - {{- end }} - {{- with .Values.notary.signer.affinity }} - affinity: -{{ toYaml . | indent 8 }} - {{- end }} - {{- with .Values.notary.signer.tolerations }} - tolerations: -{{ toYaml . | indent 8 }} - {{- end }} - {{- if .Values.notary.signer.priorityClassName }} - priorityClassName: {{ .Values.notary.signer.priorityClassName }} - {{- end }} -{{ end }} diff --git a/charts/harbor/harbor/templates/notary/notary-svc.yaml b/charts/harbor/harbor/templates/notary/notary-svc.yaml deleted file mode 100644 index b6aa42d89..000000000 --- a/charts/harbor/harbor/templates/notary/notary-svc.yaml +++ /dev/null @@ -1,35 +0,0 @@ -{{ if .Values.notary.enabled }} -apiVersion: v1 -kind: Service -metadata: - name: {{ template "harbor.notary-server" . }} - labels: -{{ include "harbor.labels" . | indent 4 }} -{{- with .Values.notary.serviceAnnotations }} - annotations: - {{- toYaml . | nindent 4 }} -{{- end }} -spec: -{{- if or (eq .Values.expose.ingress.controller "gce") (eq .Values.expose.ingress.controller "alb") (eq .Values.expose.ingress.controller "f5-bigip") }} - type: NodePort -{{- end }} - ports: - - port: 4443 - selector: -{{ include "harbor.matchLabels" . | indent 4 }} - component: notary-server - ---- -apiVersion: v1 -kind: Service -metadata: - name: {{ template "harbor.notary-signer" . }} - labels: -{{ include "harbor.labels" . | indent 4 }} -spec: - ports: - - port: 7899 - selector: -{{ include "harbor.matchLabels" . | indent 4 }} - component: notary-signer -{{ end }} diff --git a/charts/harbor/harbor/templates/portal/configmap.yaml b/charts/harbor/harbor/templates/portal/configmap.yaml index 1cea8ab63..7b2118e72 100644 --- a/charts/harbor/harbor/templates/portal/configmap.yaml +++ b/charts/harbor/harbor/templates/portal/configmap.yaml @@ -30,8 +30,12 @@ data: ssl_certificate_key /etc/harbor/ssl/portal/tls.key; # Recommendations from https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html - ssl_protocols TLSv1.2; + ssl_protocols TLSv1.2 TLSv1.3; + {{- if .Values.internalTLS.strong_ssl_ciphers }} + ssl_ciphers ECDHE+AESGCM:DHE+AESGCM:ECDHE+RSA+SHA256:DHE+RSA+SHA256:!AES128; + {{ else }} ssl_ciphers '!aNULL:kECDH+AESGCM:ECDH+AESGCM:RSA+AESGCM:kECDH+AES:ECDH+AES:RSA+AES:'; + {{- end }} ssl_prefer_server_ciphers on; ssl_session_cache shared:SSL:10m; {{- else }} diff --git a/charts/harbor/harbor/templates/portal/deployment.yaml b/charts/harbor/harbor/templates/portal/deployment.yaml index d3469deef..959a3fd7b 100644 --- a/charts/harbor/harbor/templates/portal/deployment.yaml +++ b/charts/harbor/harbor/templates/portal/deployment.yaml @@ -17,6 +17,9 @@ spec: labels: {{ include "harbor.matchLabels" . | indent 8 }} component: portal +{{- if .Values.portal.podLabels }} +{{ toYaml .Values.portal.podLabels | indent 8 }} +{{- end }} annotations: {{- if and .Values.internalTLS.enabled (eq .Values.internalTLS.certSource "auto") }} checksum/tls: {{ include (print $.Template.BasePath "/internal/auto-tls.yaml") . | sha256sum }} @@ -39,6 +42,16 @@ spec: serviceAccountName: {{ .Values.portal.serviceAccountName }} {{- end }} automountServiceAccountToken: {{ .Values.portal.automountServiceAccountToken | default false }} +{{- with .Values.portal.topologySpreadConstraints}} + topologySpreadConstraints: +{{- range . }} + - {{ . | toYaml | indent 8 | trim }} + labelSelector: + matchLabels: +{{ include "harbor.matchLabels" $ | indent 12 }} + component: portal +{{- end }} +{{- end }} containers: - name: portal image: {{ .Values.portal.image.repository }}:{{ .Values.portal.image.tag }} @@ -46,6 +59,10 @@ spec: {{- if .Values.portal.resources }} resources: {{ toYaml .Values.portal.resources | indent 10 }} +{{- end }} +{{- with .Values.portal.extraEnvVars }} + env: +{{- toYaml . | nindent 10 }} {{- end }} livenessProbe: httpGet: diff --git a/charts/harbor/harbor/templates/redis/statefulset.yaml b/charts/harbor/harbor/templates/redis/statefulset.yaml index 74b7581fd..371b0fd5a 100644 --- a/charts/harbor/harbor/templates/redis/statefulset.yaml +++ b/charts/harbor/harbor/templates/redis/statefulset.yaml @@ -19,6 +19,9 @@ spec: labels: {{ include "harbor.labels" . | indent 8 }} component: redis +{{- if .Values.redis.podLabels }} +{{ toYaml .Values.redis.podLabels | indent 8 }} +{{- end }} {{- if .Values.redis.podAnnotations }} annotations: {{ toYaml .Values.redis.podAnnotations | indent 8 }} @@ -53,6 +56,10 @@ spec: {{- if .Values.redis.internal.resources }} resources: {{ toYaml .Values.redis.internal.resources | indent 10 }} +{{- end }} +{{- with .Values.redis.internal.extraEnvVars }} + env: +{{- toYaml . | nindent 10 }} {{- end }} volumeMounts: - name: data diff --git a/charts/harbor/harbor/templates/registry/registry-dpl.yaml b/charts/harbor/harbor/templates/registry/registry-dpl.yaml index 118a165d4..fddba9fa8 100644 --- a/charts/harbor/harbor/templates/registry/registry-dpl.yaml +++ b/charts/harbor/harbor/templates/registry/registry-dpl.yaml @@ -24,6 +24,9 @@ spec: labels: {{ include "harbor.labels" . | indent 8 }} component: registry +{{- if .Values.registry.podLabels }} +{{ toYaml .Values.registry.podLabels | indent 8 }} +{{- end }} annotations: checksum/configmap: {{ include (print $.Template.BasePath "/registry/registry-cm.yaml") . | sha256sum }} checksum/secret: {{ include (print $.Template.BasePath "/registry/registry-secret.yaml") . | sha256sum }} @@ -51,6 +54,16 @@ spec: {{- end }} automountServiceAccountToken: {{ .Values.registry.automountServiceAccountToken | default false }} terminationGracePeriodSeconds: 120 +{{- with .Values.registry.topologySpreadConstraints}} + topologySpreadConstraints: +{{- range . }} + - {{ . | toYaml | indent 8 | trim }} + labelSelector: + matchLabels: +{{ include "harbor.matchLabels" $ | indent 12 }} + component: registry +{{- end }} +{{- end }} containers: - name: registry image: {{ .Values.registry.registry.image.repository }}:{{ .Values.registry.registry.image.tag }} @@ -107,6 +120,9 @@ spec: name: {{ .Values.persistence.imageChartStorage.azure.existingSecret }} key: AZURE_STORAGE_ACCESS_KEY {{- end }} +{{- with .Values.registry.registry.extraEnvVars }} +{{- toYaml . | nindent 8 }} +{{- end }} ports: - containerPort: {{ template "harbor.registry.containerPort" . }} - containerPort: 5001 @@ -172,6 +188,10 @@ spec: name: "{{ template "harbor.registry" . }}" - secretRef: name: "{{ template "harbor.registryCtl" . }}" + {{- if .Values.persistence.imageChartStorage.s3.existingSecret }} + - secretRef: + name: {{ .Values.persistence.imageChartStorage.s3.existingSecret }} + {{- end }} env: - name: CORE_SECRET valueFrom: @@ -215,6 +235,9 @@ spec: name: {{ .Values.persistence.imageChartStorage.azure.existingSecret }} key: AZURE_STORAGE_ACCESS_KEY {{- end }} +{{- with .Values.registry.controller.extraEnvVars }} +{{- toYaml . | nindent 8 }} +{{- end }} ports: - containerPort: {{ template "harbor.registryctl.containerPort" . }} volumeMounts: diff --git a/charts/harbor/harbor/templates/trivy/trivy-sts.yaml b/charts/harbor/harbor/templates/trivy/trivy-sts.yaml index 37b19ac2d..aba23c9e8 100644 --- a/charts/harbor/harbor/templates/trivy/trivy-sts.yaml +++ b/charts/harbor/harbor/templates/trivy/trivy-sts.yaml @@ -19,6 +19,9 @@ spec: labels: {{ include "harbor.labels" . | indent 8 }} component: trivy +{{- if .Values.trivy.podLabels }} +{{ toYaml .Values.trivy.podLabels | indent 8 }} +{{- end }} annotations: checksum/secret: {{ include (print $.Template.BasePath "/trivy/trivy-secret.yaml") . | sha256sum }} {{- if and .Values.internalTLS.enabled (eq .Values.internalTLS.certSource "auto") }} @@ -41,6 +44,16 @@ spec: runAsUser: 10000 fsGroup: 10000 automountServiceAccountToken: {{ .Values.trivy.automountServiceAccountToken | default false }} +{{- with .Values.trivy.topologySpreadConstraints}} + topologySpreadConstraints: +{{- range . }} + - {{ . | toYaml | indent 8 | trim }} + labelSelector: + matchLabels: +{{ include "harbor.matchLabels" $ | indent 12 }} + component: trivy +{{- end }} +{{- end }} containers: - name: trivy image: {{ .Values.trivy.image.repository }}:{{ .Values.trivy.image.tag }} @@ -111,6 +124,9 @@ spec: secretKeyRef: name: {{ template "harbor.trivy" . }} key: redisURL +{{- with .Values.trivy.extraEnvVars }} +{{- toYaml . | nindent 12 }} +{{- end }} ports: - name: api-server containerPort: {{ template "harbor.trivy.containerPort" . }} diff --git a/charts/harbor/harbor/values.yaml b/charts/harbor/harbor/values.yaml index 11d2c4a57..1160c94eb 100644 --- a/charts/harbor/harbor/values.yaml +++ b/charts/harbor/harbor/values.yaml @@ -26,15 +26,9 @@ expose: # "tls.crt" - the certificate # "tls.key" - the private key secretName: "" - # The name of secret which contains keys named: - # "tls.crt" - the certificate - # "tls.key" - the private key - # Only needed when the "expose.type" is "ingress". - notarySecretName: "" ingress: hosts: core: core.harbor.domain - notary: notary.harbor.domain # set to the type of ingress controller if it has specific requirements. # leave as `default` for most ingress controllers. # set to `gce` if using the GCE ingress controller @@ -52,11 +46,6 @@ expose: ingress.kubernetes.io/proxy-body-size: "0" nginx.ingress.kubernetes.io/ssl-redirect: "true" nginx.ingress.kubernetes.io/proxy-body-size: "0" - notary: - # notary ingress-specific annotations - annotations: {} - # notary ingress-specific labels - labels: {} harbor: # harbor ingress-specific annotations annotations: {} @@ -71,10 +60,6 @@ expose: # The service port Harbor listens on when serving HTTP httpPort: 80 # The service port Harbor listens on when serving HTTPS - httpsPort: 443 - # The service port Notary listens on. Only needed when notary.enabled - # is set to true - notaryPort: 4443 nodePort: # The name of NodePort service name: harbor @@ -89,12 +74,6 @@ expose: port: 443 # The node port Harbor listens on when serving HTTPS nodePort: 30003 - # Only needed when notary.enabled is set to true - notary: - # The service port Notary listens on - port: 4443 - # The node port Notary listens on - nodePort: 30004 loadBalancer: # The name of LoadBalancer service name: harbor @@ -105,15 +84,12 @@ expose: httpPort: 80 # The service port Harbor listens on when serving HTTPS httpsPort: 443 - # The service port Notary listens on. Only needed when notary.enabled - # is set to true - notaryPort: 4443 annotations: {} sourceRanges: [] # The external URL for Harbor core service. It is used to # 1) populate the docker/helm commands showed on portal -# 2) populate the token service URL returned to docker/notary client +# 2) populate the token service URL returned to docker client # # Format: protocol://domain[:port]. Usually: # 1) if "expose.type" is "ingress", the "domain" should be @@ -127,10 +103,12 @@ expose: externalURL: https://core.harbor.domain # The internal TLS used for harbor components secure communicating. In order to enable https -# in each components tls cert files need to provided in advance. +# in each component tls cert files need to provided in advance. internalTLS: # If internal TLS enabled enabled: false + # enable strong ssl ciphers (default: false) + strong_ssl_ciphers: false # There are three ways to provide tls # 1) "auto" will generate cert automatically # 2) "manual" need provide cert file manually in following value @@ -249,14 +227,14 @@ persistence: annotations: {} # Define which storage backend is used for registry to store # images and charts. Refer to - # https://github.com/docker/distribution/blob/master/docs/configuration.md#storage + # https://github.com/distribution/distribution/blob/main/docs/configuration.md#storage # for the detail. imageChartStorage: # Specify whether to disable `redirect` for images and chart storage, for # backends which not supported it (such as using minio for `s3` storage type), please disable # it. To disable redirects, simply set `disableredirect` to `true` instead. # Refer to - # https://github.com/docker/distribution/blob/master/docs/configuration.md#redirect + # https://github.com/distribution/distribution/blob/main/docs/configuration.md#redirect # for the detail. disableredirect: false # Specify the "caBundleSecretName" if the storage service uses a self-signed certificate. @@ -284,7 +262,7 @@ persistence: encodedkey: base64-encoded-json-key-file #rootdirectory: /gcs/object/name/prefix #chunksize: "5242880" - # To use existing secret, the key must be gcs-key.json + # To use existing secret, the key must be GCS_KEY_DATA existingSecret: "" useWorkloadIdentity: false s3: @@ -400,7 +378,7 @@ enableMigrateHelmHook: false nginx: image: repository: goharbor/nginx-photon - tag: v2.8.4 + tag: v2.9.0 # set the service account to be used, default if left empty serviceAccountName: "" # mount the service account token @@ -411,18 +389,27 @@ nginx: # requests: # memory: 256Mi # cpu: 100m + extraEnvVars: [] nodeSelector: {} tolerations: [] affinity: {} + # Spread Pods across failure-domains like regions, availability zones or nodes + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # nodeTaintsPolicy: Honor + # whenUnsatisfiable: DoNotSchedule ## Additional deployment annotations podAnnotations: {} + ## Additional deployment labels + podLabels: {} ## The priority class to run the pod as priorityClassName: portal: image: repository: goharbor/harbor-portal - tag: v2.8.4 + tag: v2.9.0 # set the service account to be used, default if left empty serviceAccountName: "" # mount the service account token @@ -433,18 +420,27 @@ portal: # requests: # memory: 256Mi # cpu: 100m + extraEnvVars: [] nodeSelector: {} tolerations: [] affinity: {} + # Spread Pods across failure-domains like regions, availability zones or nodes + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # nodeTaintsPolicy: Honor + # whenUnsatisfiable: DoNotSchedule ## Additional deployment annotations podAnnotations: {} + ## Additional deployment labels + podLabels: {} ## The priority class to run the pod as priorityClassName: core: image: repository: goharbor/harbor-core - tag: v2.8.4 + tag: v2.9.0 # set the service account to be used, default if left empty serviceAccountName: "" # mount the service account token @@ -459,18 +455,37 @@ core: # requests: # memory: 256Mi # cpu: 100m + extraEnvVars: [] nodeSelector: {} tolerations: [] affinity: {} + # Spread Pods across failure-domains like regions, availability zones or nodes + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # nodeTaintsPolicy: Honor + # whenUnsatisfiable: DoNotSchedule ## Additional deployment annotations podAnnotations: {} + ## Additional deployment labels + podLabels: {} ## Additional service annotations serviceAnnotations: {} + ## User settings configuration json string + configureUserSettings: + # The provider for updating project quota(usage), there are 2 options, redis or db. + # By default it is implemented by db but you can configure it to redis which + # can improve the performance of high concurrent pushing to the same project, + # and reduce the database connections spike and occupies. + # Using redis will bring up some delay for quota usage updation for display, so only + # suggest switch provider to redis if you were ran into the db connections spike around + # the scenario of high concurrent pushing to same project, no improvment for other scenes. + quotaUpdateProvider: db # Or redis # Secret is used when core server communicates with other components. # If a secret key is not specified, Helm will generate one. # Must be a string of 16 chars. secret: "" - # Fill the name of a kubernetes secret if you want to use your own + # Fill in the name of a kubernetes secret if you want to use your own # TLS certificate and private key for token encryption/decryption. # The secret must contain keys named: # "tls.key" - the private key @@ -497,7 +512,7 @@ core: jobservice: image: repository: goharbor/harbor-jobservice - tag: v2.8.4 + tag: v2.9.0 replicas: 1 revisionHistoryLimit: 10 # set the service account to be used, default if left empty @@ -525,11 +540,20 @@ jobservice: # requests: # memory: 256Mi # cpu: 100m + extraEnvVars: [] nodeSelector: {} tolerations: [] affinity: {} + # Spread Pods across failure-domains like regions, availability zones or nodes + topologySpreadConstraints: + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # nodeTaintsPolicy: Honor + # whenUnsatisfiable: DoNotSchedule ## Additional deployment annotations podAnnotations: {} + ## Additional deployment labels + podLabels: {} # Secret is used when job service communicates with other components. # If a secret key is not specified, Helm will generate one. # Must be a string of 16 chars. @@ -545,32 +569,42 @@ registry: registry: image: repository: goharbor/registry-photon - tag: v2.8.4 + tag: v2.9.0 # resources: # requests: # memory: 256Mi # cpu: 100m + extraEnvVars: [] controller: image: repository: goharbor/harbor-registryctl - tag: v2.8.4 + tag: v2.9.0 # resources: # requests: # memory: 256Mi # cpu: 100m + extraEnvVars: [] replicas: 1 revisionHistoryLimit: 10 nodeSelector: {} tolerations: [] affinity: {} + # Spread Pods across failure-domains like regions, availability zones or nodes + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # nodeTaintsPolicy: Honor + # whenUnsatisfiable: DoNotSchedule ## Additional deployment annotations podAnnotations: {} + ## Additional deployment labels + podLabels: {} ## The priority class to run the pod as priorityClassName: # Secret is used to secure the upload state from client # and registry storage backend. - # See: https://github.com/docker/distribution/blob/master/docs/configuration.md#http + # See: https://github.com/distribution/distribution/blob/main/docs/configuration.md#http # If a secret key is not specified, Helm will generate one. # Must be a string of 16 chars. secret: "" @@ -610,7 +644,7 @@ trivy: # repository the repository for Trivy adapter image repository: goharbor/trivy-adapter-photon # tag the tag for Trivy adapter image - tag: v2.8.4 + tag: v2.9.0 # set the service account to be used, default if left empty serviceAccountName: "" # mount the service account token @@ -668,66 +702,23 @@ trivy: limits: cpu: 1 memory: 1Gi + extraEnvVars: [] nodeSelector: {} tolerations: [] affinity: {} + # Spread Pods across failure-domains like regions, availability zones or nodes + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # nodeTaintsPolicy: Honor + # whenUnsatisfiable: DoNotSchedule ## Additional deployment annotations podAnnotations: {} + ## Additional deployment labels + podLabels: {} ## The priority class to run the pod as priorityClassName: -notary: - enabled: true - server: - # set the service account to be used, default if left empty - serviceAccountName: "" - # mount the service account token - automountServiceAccountToken: false - image: - repository: goharbor/notary-server-photon - tag: v2.8.4 - replicas: 1 - # resources: - # requests: - # memory: 256Mi - # cpu: 100m - nodeSelector: {} - tolerations: [] - affinity: {} - ## Additional deployment annotations - podAnnotations: {} - ## The priority class to run the pod as - priorityClassName: - ## Additional service annotations - serviceAnnotations: {} - signer: - # set the service account to be used, default if left empty - serviceAccountName: "" - # mount the service account token - automountServiceAccountToken: false - image: - repository: goharbor/notary-signer-photon - tag: v2.8.4 - replicas: 1 - # resources: - # requests: - # memory: 256Mi - # cpu: 100m - nodeSelector: {} - tolerations: [] - affinity: {} - ## Additional deployment annotations - podAnnotations: {} - ## The priority class to run the pod as - priorityClassName: - # Fill the name of a kubernetes secret if you want to use your own - # TLS certificate authority, certificate and private key for notary - # communications. - # The secret must contain keys named ca.crt, tls.crt and tls.key that - # contain the CA, certificate and private key. - # They will be generated if not set. - secretName: "" - database: # if external database is used, set "type" to "external" # and fill the connection information in "external" section @@ -739,7 +730,7 @@ database: automountServiceAccountToken: false image: repository: goharbor/harbor-db - tag: v2.8.4 + tag: v2.9.0 # The initial superuser password for internal database password: "changeit" # The size limit for Shared memory, pgSQL use it for shared_buffer @@ -756,6 +747,7 @@ database: # The timeout used in readinessProbe; 1 to 5 seconds readinessProbe: timeoutSeconds: 1 + extraEnvVars: [] nodeSelector: {} tolerations: [] affinity: {} @@ -778,8 +770,6 @@ database: username: "user" password: "password" coreDatabase: "registry" - notaryServerDatabase: "notary_server" - notarySignerDatabase: "notary_signer" # if using existing secret, the key must be "password" existingSecret: "" # "disable" - No SSL @@ -799,6 +789,8 @@ database: maxOpenConns: 900 ## Additional deployment annotations podAnnotations: {} + ## Additional deployment labels + podLabels: {} redis: # if external Redis is used, set "type" to "external" @@ -811,11 +803,12 @@ redis: automountServiceAccountToken: false image: repository: goharbor/redis-photon - tag: v2.8.4 + tag: v2.9.0 # resources: # requests: # memory: 256Mi # cpu: 100m + extraEnvVars: [] nodeSelector: {} tolerations: [] affinity: {} @@ -855,6 +848,8 @@ redis: existingSecret: "" ## Additional deployment annotations podAnnotations: {} + ## Additional deployment labels + podLabels: {} exporter: replicas: 1 @@ -863,16 +858,25 @@ exporter: # requests: # memory: 256Mi # cpu: 100m + extraEnvVars: [] podAnnotations: {} + ## Additional deployment labels + podLabels: {} serviceAccountName: "" # mount the service account token automountServiceAccountToken: false image: repository: goharbor/harbor-exporter - tag: v2.8.4 + tag: v2.9.0 nodeSelector: {} tolerations: [] affinity: {} + # Spread Pods across failure-domains like regions, availability zones or nodes + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # nodeTaintsPolicy: Honor + # whenUnsatisfiable: DoNotSchedule cacheDuration: 23 cacheCleanInterval: 14400 ## The priority class to run the pod as @@ -894,7 +898,7 @@ metrics: port: 8001 ## Create prometheus serviceMonitor to scrape harbor metrics. ## This requires the monitoring.coreos.com/v1 CRD. Please see - ## https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/getting-started.md + ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/getting-started.md ## serviceMonitor: enabled: false diff --git a/charts/koor-tech/koor-operator/Chart.lock b/charts/koor-tech/koor-operator/Chart.lock index 248ebeb36..0567501a2 100644 --- a/charts/koor-tech/koor-operator/Chart.lock +++ b/charts/koor-tech/koor-operator/Chart.lock @@ -3,4 +3,4 @@ dependencies: repository: https://charts.jetstack.io version: v1.12.3 digest: sha256:3110ca458f8a903dc025408701614af03df859bf827824478ed68c785b0e6209 -generated: "2023-08-29T05:47:05.106503476Z" +generated: "2023-08-30T06:44:07.371307817Z" diff --git a/charts/koor-tech/koor-operator/Chart.yaml b/charts/koor-tech/koor-operator/Chart.yaml index 0c232a661..eec7b2b8b 100644 --- a/charts/koor-tech/koor-operator/Chart.yaml +++ b/charts/koor-tech/koor-operator/Chart.yaml @@ -14,7 +14,7 @@ annotations: catalog.cattle.io/kube-version: '>=1.19.0' catalog.cattle.io/release-name: koor-operator apiVersion: v2 -appVersion: v0.3.5 +appVersion: v0.3.6 dependencies: - alias: certmanager condition: certmanager.enabled @@ -33,4 +33,4 @@ name: koor-operator sources: - https://github.com/koor-tech/koor-operator/ type: application -version: 0.3.5 +version: 0.3.6 diff --git a/charts/koor-tech/koor-operator/README.md b/charts/koor-tech/koor-operator/README.md index b1fecff31..eebfaa460 100644 --- a/charts/koor-tech/koor-operator/README.md +++ b/charts/koor-tech/koor-operator/README.md @@ -52,7 +52,7 @@ The following table lists the configurable parameters of the rook-operator chart | `controllerManager.manager.args` | Operator args | `["--health-probe-bind-address=:8081","--metrics-bind-address=127.0.0.1:8080","--leader-elect"]` | | `controllerManager.manager.containerSecurityContext` | Operator container security context | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]}}` | | `controllerManager.manager.image.repository` | Operator image repository | `"docker.io/koorinc/koor-operator"` | -| `controllerManager.manager.image.tag` | Operator image tag | `"v0.3.5"` | +| `controllerManager.manager.image.tag` | Operator image tag | `"v0.3.6"` | | `controllerManager.manager.resources` | Operator container resources | `{"limits":{"cpu":"500m","memory":"512Mi"},"requests":{"cpu":"10m","memory":"128Mi"}}` | | `controllerManager.replicas` | | `1` | | `koorCluster.spec.dashboardEnabled` | Enable the Ceph MGR dashboard. | `true` | diff --git a/charts/koor-tech/koor-operator/templates/koorcluster-job.yaml b/charts/koor-tech/koor-operator/templates/koorcluster-job.yaml index 3d5e12b2f..cfb3ca5bd 100644 --- a/charts/koor-tech/koor-operator/templates/koorcluster-job.yaml +++ b/charts/koor-tech/koor-operator/templates/koorcluster-job.yaml @@ -8,6 +8,7 @@ metadata: {{- include "koor-operator.labels" . | nindent 4 }} spec: backoffLimit: 20 + ttlSecondsAfterFinished: 0 template: metadata: name: {{ include "koor-operator.jobName" . }} diff --git a/charts/koor-tech/koor-operator/values.yaml b/charts/koor-tech/koor-operator/values.yaml index 930cfc193..47e31e11f 100644 --- a/charts/koor-tech/koor-operator/values.yaml +++ b/charts/koor-tech/koor-operator/values.yaml @@ -49,7 +49,7 @@ controllerManager: # -- Operator image repository repository: docker.io/koorinc/koor-operator # -- Operator image tag - tag: v0.3.5 + tag: v0.3.6 # -- Operator container resources resources: limits: diff --git a/charts/kuma/kuma/Chart.yaml b/charts/kuma/kuma/Chart.yaml index d5ba4ccf0..cd1b29d8b 100644 --- a/charts/kuma/kuma/Chart.yaml +++ b/charts/kuma/kuma/Chart.yaml @@ -4,7 +4,7 @@ annotations: catalog.cattle.io/namespace: kuma-system catalog.cattle.io/release-name: kuma apiVersion: v2 -appVersion: 2.3.2 +appVersion: 2.4.0 description: A Helm chart for the Kuma Control Plane home: https://github.com/kumahq/kuma icon: https://kuma.io/assets/images/brand/kuma-logo-new.svg @@ -20,4 +20,4 @@ maintainers: name: nickolaev name: kuma type: application -version: 2.3.2 +version: 2.4.0 diff --git a/charts/kuma/kuma/README.md b/charts/kuma/kuma/README.md index 938ded5a5..0d2f33a77 100644 --- a/charts/kuma/kuma/README.md +++ b/charts/kuma/kuma/README.md @@ -2,7 +2,7 @@ A Helm chart for the Kuma Control Plane -![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 2.3.2](https://img.shields.io/badge/Version-2.3.2-informational?style=flat-square) ![AppVersion: 2.3.2](https://img.shields.io/badge/AppVersion-2.3.2-informational?style=flat-square) +![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 2.4.0](https://img.shields.io/badge/Version-2.4.0-informational?style=flat-square) ![AppVersion: 2.4.0](https://img.shields.io/badge/AppVersion-2.4.0-informational?style=flat-square) **Homepage:** diff --git a/charts/kuma/kuma/crds/kuma.io_circuitbreakers.yaml b/charts/kuma/kuma/crds/kuma.io_circuitbreakers.yaml index 196b7c1db..8a0af998e 100644 --- a/charts/kuma/kuma/crds/kuma.io_circuitbreakers.yaml +++ b/charts/kuma/kuma/crds/kuma.io_circuitbreakers.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.13.0 name: circuitbreakers.kuma.io spec: group: kuma.io diff --git a/charts/kuma/kuma/crds/kuma.io_containerpatches.yaml b/charts/kuma/kuma/crds/kuma.io_containerpatches.yaml index 20849b10b..5fbde85cf 100644 --- a/charts/kuma/kuma/crds/kuma.io_containerpatches.yaml +++ b/charts/kuma/kuma/crds/kuma.io_containerpatches.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.13.0 name: containerpatches.kuma.io spec: group: kuma.io diff --git a/charts/kuma/kuma/crds/kuma.io_dataplaneinsights.yaml b/charts/kuma/kuma/crds/kuma.io_dataplaneinsights.yaml index 7e892d597..79a541f21 100644 --- a/charts/kuma/kuma/crds/kuma.io_dataplaneinsights.yaml +++ b/charts/kuma/kuma/crds/kuma.io_dataplaneinsights.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.13.0 name: dataplaneinsights.kuma.io spec: group: kuma.io diff --git a/charts/kuma/kuma/crds/kuma.io_dataplanes.yaml b/charts/kuma/kuma/crds/kuma.io_dataplanes.yaml index 82cfefe31..1f0088638 100644 --- a/charts/kuma/kuma/crds/kuma.io_dataplanes.yaml +++ b/charts/kuma/kuma/crds/kuma.io_dataplanes.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.13.0 name: dataplanes.kuma.io spec: group: kuma.io diff --git a/charts/kuma/kuma/crds/kuma.io_externalservices.yaml b/charts/kuma/kuma/crds/kuma.io_externalservices.yaml index eed56190b..02be62004 100644 --- a/charts/kuma/kuma/crds/kuma.io_externalservices.yaml +++ b/charts/kuma/kuma/crds/kuma.io_externalservices.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.13.0 name: externalservices.kuma.io spec: group: kuma.io diff --git a/charts/kuma/kuma/crds/kuma.io_faultinjections.yaml b/charts/kuma/kuma/crds/kuma.io_faultinjections.yaml index ba4b468d5..5eeef6418 100644 --- a/charts/kuma/kuma/crds/kuma.io_faultinjections.yaml +++ b/charts/kuma/kuma/crds/kuma.io_faultinjections.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.13.0 name: faultinjections.kuma.io spec: group: kuma.io diff --git a/charts/kuma/kuma/crds/kuma.io_healthchecks.yaml b/charts/kuma/kuma/crds/kuma.io_healthchecks.yaml index ca183c9b7..c138c08e7 100644 --- a/charts/kuma/kuma/crds/kuma.io_healthchecks.yaml +++ b/charts/kuma/kuma/crds/kuma.io_healthchecks.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.13.0 name: healthchecks.kuma.io spec: group: kuma.io diff --git a/charts/kuma/kuma/crds/kuma.io_meshaccesslogs.yaml b/charts/kuma/kuma/crds/kuma.io_meshaccesslogs.yaml index a38c61452..411c1bb2c 100644 --- a/charts/kuma/kuma/crds/kuma.io_meshaccesslogs.yaml +++ b/charts/kuma/kuma/crds/kuma.io_meshaccesslogs.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.13.0 name: meshaccesslogs.kuma.io spec: group: kuma.io @@ -16,7 +16,14 @@ spec: singular: meshaccesslog scope: Namespaced versions: - - name: v1alpha1 + - additionalPrinterColumns: + - jsonPath: .spec.targetRef.kind + name: TargetRef Kind + type: string + - jsonPath: .spec.targetRef.name + name: TargetRef Name + type: string + name: v1alpha1 schema: openAPIV3Schema: properties: @@ -97,6 +104,11 @@ spec: type: string type: object type: array + body: + description: Body is a raw string or an OTLP any + value as described at https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/data-model.md#field-body + It can contain placeholders available on https://www.envoyproxy.io/docs/envoy/latest/configuration/observability/access_log/usage#command-operators + x-kubernetes-preserve-unknown-fields: true endpoint: description: Endpoint of OpenTelemetry collector. An empty port defaults to 4317. @@ -160,6 +172,7 @@ spec: enum: - Mesh - MeshSubset + - MeshGateway - MeshService - MeshServiceSubset - MeshHTTPRoute @@ -194,6 +207,7 @@ spec: enum: - Mesh - MeshSubset + - MeshGateway - MeshService - MeshServiceSubset - MeshHTTPRoute @@ -275,6 +289,11 @@ spec: type: string type: object type: array + body: + description: Body is a raw string or an OTLP any + value as described at https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/data-model.md#field-body + It can contain placeholders available on https://www.envoyproxy.io/docs/envoy/latest/configuration/observability/access_log/usage#command-operators + x-kubernetes-preserve-unknown-fields: true endpoint: description: Endpoint of OpenTelemetry collector. An empty port defaults to 4317. @@ -338,6 +357,7 @@ spec: enum: - Mesh - MeshSubset + - MeshGateway - MeshService - MeshServiceSubset - MeshHTTPRoute @@ -368,3 +388,4 @@ spec: type: object served: true storage: true + subresources: {} diff --git a/charts/kuma/kuma/crds/kuma.io_meshcircuitbreakers.yaml b/charts/kuma/kuma/crds/kuma.io_meshcircuitbreakers.yaml index 6cf06361d..ffae58e55 100644 --- a/charts/kuma/kuma/crds/kuma.io_meshcircuitbreakers.yaml +++ b/charts/kuma/kuma/crds/kuma.io_meshcircuitbreakers.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.13.0 name: meshcircuitbreakers.kuma.io spec: group: kuma.io @@ -16,7 +16,14 @@ spec: singular: meshcircuitbreaker scope: Namespaced versions: - - name: v1alpha1 + - additionalPrinterColumns: + - jsonPath: .spec.targetRef.kind + name: TargetRef Kind + type: string + - jsonPath: .spec.targetRef.name + name: TargetRef Name + type: string + name: v1alpha1 schema: openAPIV3Schema: properties: @@ -301,6 +308,7 @@ spec: enum: - Mesh - MeshSubset + - MeshGateway - MeshService - MeshServiceSubset - MeshHTTPRoute @@ -335,6 +343,7 @@ spec: enum: - Mesh - MeshSubset + - MeshGateway - MeshService - MeshServiceSubset - MeshHTTPRoute @@ -619,6 +628,7 @@ spec: enum: - Mesh - MeshSubset + - MeshGateway - MeshService - MeshServiceSubset - MeshHTTPRoute @@ -649,3 +659,4 @@ spec: type: object served: true storage: true + subresources: {} diff --git a/charts/kuma/kuma/crds/kuma.io_meshes.yaml b/charts/kuma/kuma/crds/kuma.io_meshes.yaml index 8e5f84539..7e1848086 100644 --- a/charts/kuma/kuma/crds/kuma.io_meshes.yaml +++ b/charts/kuma/kuma/crds/kuma.io_meshes.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.13.0 name: meshes.kuma.io spec: group: kuma.io diff --git a/charts/kuma/kuma/crds/kuma.io_meshfaultinjections.yaml b/charts/kuma/kuma/crds/kuma.io_meshfaultinjections.yaml index b8f55fbb2..be0a3a7ca 100644 --- a/charts/kuma/kuma/crds/kuma.io_meshfaultinjections.yaml +++ b/charts/kuma/kuma/crds/kuma.io_meshfaultinjections.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.13.0 name: meshfaultinjections.kuma.io spec: group: kuma.io @@ -16,7 +16,14 @@ spec: singular: meshfaultinjection scope: Namespaced versions: - - name: v1alpha1 + - additionalPrinterColumns: + - jsonPath: .spec.targetRef.kind + name: TargetRef Kind + type: string + - jsonPath: .spec.targetRef.name + name: TargetRef Name + type: string + name: v1alpha1 schema: openAPIV3Schema: properties: @@ -127,6 +134,7 @@ spec: enum: - Mesh - MeshSubset + - MeshGateway - MeshService - MeshServiceSubset - MeshHTTPRoute @@ -161,6 +169,7 @@ spec: enum: - Mesh - MeshSubset + - MeshGateway - MeshService - MeshServiceSubset - MeshHTTPRoute @@ -186,3 +195,4 @@ spec: type: object served: true storage: true + subresources: {} diff --git a/charts/kuma/kuma/crds/kuma.io_meshgatewayinstances.yaml b/charts/kuma/kuma/crds/kuma.io_meshgatewayinstances.yaml index 4b2958a61..b0056e5ad 100644 --- a/charts/kuma/kuma/crds/kuma.io_meshgatewayinstances.yaml +++ b/charts/kuma/kuma/crds/kuma.io_meshgatewayinstances.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.13.0 name: meshgatewayinstances.kuma.io spec: group: kuma.io diff --git a/charts/kuma/kuma/crds/kuma.io_meshgatewayroutes.yaml b/charts/kuma/kuma/crds/kuma.io_meshgatewayroutes.yaml index 032cffecb..81ffb9b48 100644 --- a/charts/kuma/kuma/crds/kuma.io_meshgatewayroutes.yaml +++ b/charts/kuma/kuma/crds/kuma.io_meshgatewayroutes.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.13.0 name: meshgatewayroutes.kuma.io spec: group: kuma.io diff --git a/charts/kuma/kuma/crds/kuma.io_meshgateways.yaml b/charts/kuma/kuma/crds/kuma.io_meshgateways.yaml index 98f98f574..76eba91ac 100644 --- a/charts/kuma/kuma/crds/kuma.io_meshgateways.yaml +++ b/charts/kuma/kuma/crds/kuma.io_meshgateways.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.13.0 name: meshgateways.kuma.io spec: group: kuma.io diff --git a/charts/kuma/kuma/crds/kuma.io_meshhealthchecks.yaml b/charts/kuma/kuma/crds/kuma.io_meshhealthchecks.yaml index 1ce431463..f97352a7d 100644 --- a/charts/kuma/kuma/crds/kuma.io_meshhealthchecks.yaml +++ b/charts/kuma/kuma/crds/kuma.io_meshhealthchecks.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.13.0 name: meshhealthchecks.kuma.io spec: group: kuma.io @@ -16,7 +16,14 @@ spec: singular: meshhealthcheck scope: Namespaced versions: - - name: v1alpha1 + - additionalPrinterColumns: + - jsonPath: .spec.targetRef.kind + name: TargetRef Kind + type: string + - jsonPath: .spec.targetRef.name + name: TargetRef Name + type: string + name: v1alpha1 schema: openAPIV3Schema: properties: @@ -45,6 +52,7 @@ spec: enum: - Mesh - MeshSubset + - MeshGateway - MeshService - MeshServiceSubset - MeshHTTPRoute @@ -270,6 +278,7 @@ spec: enum: - Mesh - MeshSubset + - MeshGateway - MeshService - MeshServiceSubset - MeshHTTPRoute @@ -300,3 +309,4 @@ spec: type: object served: true storage: true + subresources: {} diff --git a/charts/kuma/kuma/crds/kuma.io_meshhttproutes.yaml b/charts/kuma/kuma/crds/kuma.io_meshhttproutes.yaml index d75796690..23e575e7e 100644 --- a/charts/kuma/kuma/crds/kuma.io_meshhttproutes.yaml +++ b/charts/kuma/kuma/crds/kuma.io_meshhttproutes.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.13.0 name: meshhttproutes.kuma.io spec: group: kuma.io @@ -16,7 +16,14 @@ spec: singular: meshhttproute scope: Namespaced versions: - - name: v1alpha1 + - additionalPrinterColumns: + - jsonPath: .spec.targetRef.kind + name: TargetRef Kind + type: string + - jsonPath: .spec.targetRef.name + name: TargetRef Name + type: string + name: v1alpha1 schema: openAPIV3Schema: properties: @@ -45,6 +52,7 @@ spec: enum: - Mesh - MeshSubset + - MeshGateway - MeshService - MeshServiceSubset - MeshHTTPRoute @@ -88,6 +96,7 @@ spec: enum: - Mesh - MeshSubset + - MeshGateway - MeshService - MeshServiceSubset - MeshHTTPRoute @@ -181,6 +190,7 @@ spec: enum: - Mesh - MeshSubset + - MeshGateway - MeshService - MeshServiceSubset - MeshHTTPRoute @@ -473,6 +483,7 @@ spec: enum: - Mesh - MeshSubset + - MeshGateway - MeshService - MeshServiceSubset - MeshHTTPRoute @@ -499,3 +510,4 @@ spec: type: object served: true storage: true + subresources: {} diff --git a/charts/kuma/kuma/crds/kuma.io_meshinsights.yaml b/charts/kuma/kuma/crds/kuma.io_meshinsights.yaml index 5391c4b88..f9c307168 100644 --- a/charts/kuma/kuma/crds/kuma.io_meshinsights.yaml +++ b/charts/kuma/kuma/crds/kuma.io_meshinsights.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.13.0 name: meshinsights.kuma.io spec: group: kuma.io diff --git a/charts/kuma/kuma/crds/kuma.io_meshloadbalancingstrategies.yaml b/charts/kuma/kuma/crds/kuma.io_meshloadbalancingstrategies.yaml index d4861794d..0edd941bc 100644 --- a/charts/kuma/kuma/crds/kuma.io_meshloadbalancingstrategies.yaml +++ b/charts/kuma/kuma/crds/kuma.io_meshloadbalancingstrategies.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.13.0 name: meshloadbalancingstrategies.kuma.io spec: group: kuma.io @@ -16,7 +16,14 @@ spec: singular: meshloadbalancingstrategy scope: Namespaced versions: - - name: v1alpha1 + - additionalPrinterColumns: + - jsonPath: .spec.targetRef.kind + name: TargetRef Kind + type: string + - jsonPath: .spec.targetRef.name + name: TargetRef Name + type: string + name: v1alpha1 schema: openAPIV3Schema: properties: @@ -46,6 +53,7 @@ spec: enum: - Mesh - MeshSubset + - MeshGateway - MeshService - MeshServiceSubset - MeshHTTPRoute @@ -382,6 +390,7 @@ spec: enum: - Mesh - MeshSubset + - MeshGateway - MeshService - MeshServiceSubset - MeshHTTPRoute @@ -412,3 +421,4 @@ spec: type: object served: true storage: true + subresources: {} diff --git a/charts/kuma/kuma/crds/kuma.io_meshproxypatches.yaml b/charts/kuma/kuma/crds/kuma.io_meshproxypatches.yaml index c6a223035..42b4cd47c 100644 --- a/charts/kuma/kuma/crds/kuma.io_meshproxypatches.yaml +++ b/charts/kuma/kuma/crds/kuma.io_meshproxypatches.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.13.0 name: meshproxypatches.kuma.io spec: group: kuma.io @@ -16,7 +16,14 @@ spec: singular: meshproxypatch scope: Namespaced versions: - - name: v1alpha1 + - additionalPrinterColumns: + - jsonPath: .spec.targetRef.kind + name: TargetRef Kind + type: string + - jsonPath: .spec.targetRef.name + name: TargetRef Name + type: string + name: v1alpha1 schema: openAPIV3Schema: properties: @@ -475,6 +482,7 @@ spec: enum: - Mesh - MeshSubset + - MeshGateway - MeshService - MeshServiceSubset - MeshHTTPRoute @@ -501,3 +509,4 @@ spec: type: object served: true storage: true + subresources: {} diff --git a/charts/kuma/kuma/crds/kuma.io_meshratelimits.yaml b/charts/kuma/kuma/crds/kuma.io_meshratelimits.yaml index 84c03219e..abfd51f34 100644 --- a/charts/kuma/kuma/crds/kuma.io_meshratelimits.yaml +++ b/charts/kuma/kuma/crds/kuma.io_meshratelimits.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.13.0 name: meshratelimits.kuma.io spec: group: kuma.io @@ -16,7 +16,14 @@ spec: singular: meshratelimit scope: Namespaced versions: - - name: v1alpha1 + - additionalPrinterColumns: + - jsonPath: .spec.targetRef.kind + name: TargetRef Kind + type: string + - jsonPath: .spec.targetRef.name + name: TargetRef Name + type: string + name: v1alpha1 schema: openAPIV3Schema: properties: @@ -165,6 +172,7 @@ spec: enum: - Mesh - MeshSubset + - MeshGateway - MeshService - MeshServiceSubset - MeshHTTPRoute @@ -199,6 +207,7 @@ spec: enum: - Mesh - MeshSubset + - MeshGateway - MeshService - MeshServiceSubset - MeshHTTPRoute @@ -224,3 +233,4 @@ spec: type: object served: true storage: true + subresources: {} diff --git a/charts/kuma/kuma/crds/kuma.io_meshretries.yaml b/charts/kuma/kuma/crds/kuma.io_meshretries.yaml index a136c8fa0..d724395a3 100644 --- a/charts/kuma/kuma/crds/kuma.io_meshretries.yaml +++ b/charts/kuma/kuma/crds/kuma.io_meshretries.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.13.0 name: meshretries.kuma.io spec: group: kuma.io @@ -16,7 +16,14 @@ spec: singular: meshretry scope: Namespaced versions: - - name: v1alpha1 + - additionalPrinterColumns: + - jsonPath: .spec.targetRef.kind + name: TargetRef Kind + type: string + - jsonPath: .spec.targetRef.name + name: TargetRef Name + type: string + name: v1alpha1 schema: openAPIV3Schema: properties: @@ -45,6 +52,7 @@ spec: enum: - Mesh - MeshSubset + - MeshGateway - MeshService - MeshServiceSubset - MeshHTTPRoute @@ -364,6 +372,7 @@ spec: enum: - Mesh - MeshSubset + - MeshGateway - MeshService - MeshServiceSubset - MeshHTTPRoute @@ -394,3 +403,4 @@ spec: type: object served: true storage: true + subresources: {} diff --git a/charts/kuma/kuma/crds/kuma.io_meshtcproutes.yaml b/charts/kuma/kuma/crds/kuma.io_meshtcproutes.yaml index 11b589189..1bc3081aa 100644 --- a/charts/kuma/kuma/crds/kuma.io_meshtcproutes.yaml +++ b/charts/kuma/kuma/crds/kuma.io_meshtcproutes.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.13.0 name: meshtcproutes.kuma.io spec: group: kuma.io @@ -16,7 +16,14 @@ spec: singular: meshtcproute scope: Namespaced versions: - - name: v1alpha1 + - additionalPrinterColumns: + - jsonPath: .spec.targetRef.kind + name: TargetRef Kind + type: string + - jsonPath: .spec.targetRef.name + name: TargetRef Name + type: string + name: v1alpha1 schema: openAPIV3Schema: properties: @@ -45,6 +52,7 @@ spec: enum: - Mesh - MeshSubset + - MeshGateway - MeshService - MeshServiceSubset - MeshHTTPRoute @@ -88,6 +96,7 @@ spec: enum: - Mesh - MeshSubset + - MeshGateway - MeshService - MeshServiceSubset - MeshHTTPRoute @@ -132,6 +141,7 @@ spec: enum: - Mesh - MeshSubset + - MeshGateway - MeshService - MeshServiceSubset - MeshHTTPRoute @@ -163,3 +173,4 @@ spec: type: object served: true storage: true + subresources: {} diff --git a/charts/kuma/kuma/crds/kuma.io_meshtimeouts.yaml b/charts/kuma/kuma/crds/kuma.io_meshtimeouts.yaml index c49cf77da..c55e957a8 100644 --- a/charts/kuma/kuma/crds/kuma.io_meshtimeouts.yaml +++ b/charts/kuma/kuma/crds/kuma.io_meshtimeouts.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.13.0 name: meshtimeouts.kuma.io spec: group: kuma.io @@ -16,7 +16,14 @@ spec: singular: meshtimeout scope: Namespaced versions: - - name: v1alpha1 + - additionalPrinterColumns: + - jsonPath: .spec.targetRef.kind + name: TargetRef Kind + type: string + - jsonPath: .spec.targetRef.name + name: TargetRef Name + type: string + name: v1alpha1 schema: openAPIV3Schema: properties: @@ -96,6 +103,7 @@ spec: enum: - Mesh - MeshSubset + - MeshGateway - MeshService - MeshServiceSubset - MeshHTTPRoute @@ -130,6 +138,7 @@ spec: enum: - Mesh - MeshSubset + - MeshGateway - MeshService - MeshServiceSubset - MeshHTTPRoute @@ -210,6 +219,7 @@ spec: enum: - Mesh - MeshSubset + - MeshGateway - MeshService - MeshServiceSubset - MeshHTTPRoute @@ -240,3 +250,4 @@ spec: type: object served: true storage: true + subresources: {} diff --git a/charts/kuma/kuma/crds/kuma.io_meshtraces.yaml b/charts/kuma/kuma/crds/kuma.io_meshtraces.yaml index c6561b212..0e8b08c9d 100644 --- a/charts/kuma/kuma/crds/kuma.io_meshtraces.yaml +++ b/charts/kuma/kuma/crds/kuma.io_meshtraces.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.13.0 name: meshtraces.kuma.io spec: group: kuma.io @@ -16,7 +16,14 @@ spec: singular: meshtrace scope: Namespaced versions: - - name: v1alpha1 + - additionalPrinterColumns: + - jsonPath: .spec.targetRef.kind + name: TargetRef Kind + type: string + - jsonPath: .spec.targetRef.name + name: TargetRef Name + type: string + name: v1alpha1 schema: openAPIV3Schema: properties: @@ -193,6 +200,7 @@ spec: enum: - Mesh - MeshSubset + - MeshGateway - MeshService - MeshServiceSubset - MeshHTTPRoute @@ -218,3 +226,4 @@ spec: type: object served: true storage: true + subresources: {} diff --git a/charts/kuma/kuma/crds/kuma.io_meshtrafficpermissions.yaml b/charts/kuma/kuma/crds/kuma.io_meshtrafficpermissions.yaml index 83e1920c2..3ab56942e 100644 --- a/charts/kuma/kuma/crds/kuma.io_meshtrafficpermissions.yaml +++ b/charts/kuma/kuma/crds/kuma.io_meshtrafficpermissions.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.13.0 name: meshtrafficpermissions.kuma.io spec: group: kuma.io @@ -16,7 +16,14 @@ spec: singular: meshtrafficpermission scope: Namespaced versions: - - name: v1alpha1 + - additionalPrinterColumns: + - jsonPath: .spec.targetRef.kind + name: TargetRef Kind + type: string + - jsonPath: .spec.targetRef.name + name: TargetRef Name + type: string + name: v1alpha1 schema: openAPIV3Schema: properties: @@ -63,6 +70,7 @@ spec: enum: - Mesh - MeshSubset + - MeshGateway - MeshService - MeshServiceSubset - MeshHTTPRoute @@ -97,6 +105,7 @@ spec: enum: - Mesh - MeshSubset + - MeshGateway - MeshService - MeshServiceSubset - MeshHTTPRoute @@ -122,3 +131,4 @@ spec: type: object served: true storage: true + subresources: {} diff --git a/charts/kuma/kuma/crds/kuma.io_proxytemplates.yaml b/charts/kuma/kuma/crds/kuma.io_proxytemplates.yaml index 6b05719d8..111d4450f 100644 --- a/charts/kuma/kuma/crds/kuma.io_proxytemplates.yaml +++ b/charts/kuma/kuma/crds/kuma.io_proxytemplates.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.13.0 name: proxytemplates.kuma.io spec: group: kuma.io diff --git a/charts/kuma/kuma/crds/kuma.io_ratelimits.yaml b/charts/kuma/kuma/crds/kuma.io_ratelimits.yaml index 2bd6dcacd..cc6fa13fa 100644 --- a/charts/kuma/kuma/crds/kuma.io_ratelimits.yaml +++ b/charts/kuma/kuma/crds/kuma.io_ratelimits.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.13.0 name: ratelimits.kuma.io spec: group: kuma.io diff --git a/charts/kuma/kuma/crds/kuma.io_retries.yaml b/charts/kuma/kuma/crds/kuma.io_retries.yaml index 01cb88902..865df1b2f 100644 --- a/charts/kuma/kuma/crds/kuma.io_retries.yaml +++ b/charts/kuma/kuma/crds/kuma.io_retries.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.13.0 name: retries.kuma.io spec: group: kuma.io diff --git a/charts/kuma/kuma/crds/kuma.io_serviceinsights.yaml b/charts/kuma/kuma/crds/kuma.io_serviceinsights.yaml index a85e134ce..135eaedda 100644 --- a/charts/kuma/kuma/crds/kuma.io_serviceinsights.yaml +++ b/charts/kuma/kuma/crds/kuma.io_serviceinsights.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.13.0 name: serviceinsights.kuma.io spec: group: kuma.io diff --git a/charts/kuma/kuma/crds/kuma.io_timeouts.yaml b/charts/kuma/kuma/crds/kuma.io_timeouts.yaml index a04dd9d6f..b2f8b3d60 100644 --- a/charts/kuma/kuma/crds/kuma.io_timeouts.yaml +++ b/charts/kuma/kuma/crds/kuma.io_timeouts.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.13.0 name: timeouts.kuma.io spec: group: kuma.io diff --git a/charts/kuma/kuma/crds/kuma.io_trafficlogs.yaml b/charts/kuma/kuma/crds/kuma.io_trafficlogs.yaml index 2e1b5e864..c74f9a90f 100644 --- a/charts/kuma/kuma/crds/kuma.io_trafficlogs.yaml +++ b/charts/kuma/kuma/crds/kuma.io_trafficlogs.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.13.0 name: trafficlogs.kuma.io spec: group: kuma.io diff --git a/charts/kuma/kuma/crds/kuma.io_trafficpermissions.yaml b/charts/kuma/kuma/crds/kuma.io_trafficpermissions.yaml index 820cf2b13..b9469c8c9 100644 --- a/charts/kuma/kuma/crds/kuma.io_trafficpermissions.yaml +++ b/charts/kuma/kuma/crds/kuma.io_trafficpermissions.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.13.0 name: trafficpermissions.kuma.io spec: group: kuma.io diff --git a/charts/kuma/kuma/crds/kuma.io_trafficroutes.yaml b/charts/kuma/kuma/crds/kuma.io_trafficroutes.yaml index b2ce22ebf..1e3158363 100644 --- a/charts/kuma/kuma/crds/kuma.io_trafficroutes.yaml +++ b/charts/kuma/kuma/crds/kuma.io_trafficroutes.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.13.0 name: trafficroutes.kuma.io spec: group: kuma.io diff --git a/charts/kuma/kuma/crds/kuma.io_traffictraces.yaml b/charts/kuma/kuma/crds/kuma.io_traffictraces.yaml index 24bedcbe9..f85ababd9 100644 --- a/charts/kuma/kuma/crds/kuma.io_traffictraces.yaml +++ b/charts/kuma/kuma/crds/kuma.io_traffictraces.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.13.0 name: traffictraces.kuma.io spec: group: kuma.io diff --git a/charts/kuma/kuma/crds/kuma.io_virtualoutbounds.yaml b/charts/kuma/kuma/crds/kuma.io_virtualoutbounds.yaml index 19e7be87c..a5fe905e0 100644 --- a/charts/kuma/kuma/crds/kuma.io_virtualoutbounds.yaml +++ b/charts/kuma/kuma/crds/kuma.io_virtualoutbounds.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.13.0 name: virtualoutbounds.kuma.io spec: group: kuma.io diff --git a/charts/kuma/kuma/crds/kuma.io_zoneegresses.yaml b/charts/kuma/kuma/crds/kuma.io_zoneegresses.yaml index 780d25682..b202d0fb8 100644 --- a/charts/kuma/kuma/crds/kuma.io_zoneegresses.yaml +++ b/charts/kuma/kuma/crds/kuma.io_zoneegresses.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.13.0 name: zoneegresses.kuma.io spec: group: kuma.io diff --git a/charts/kuma/kuma/crds/kuma.io_zoneegressinsights.yaml b/charts/kuma/kuma/crds/kuma.io_zoneegressinsights.yaml index e1221ab49..50c7f6864 100644 --- a/charts/kuma/kuma/crds/kuma.io_zoneegressinsights.yaml +++ b/charts/kuma/kuma/crds/kuma.io_zoneegressinsights.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.13.0 name: zoneegressinsights.kuma.io spec: group: kuma.io diff --git a/charts/kuma/kuma/crds/kuma.io_zoneingresses.yaml b/charts/kuma/kuma/crds/kuma.io_zoneingresses.yaml index c91cd56cb..0754071e2 100644 --- a/charts/kuma/kuma/crds/kuma.io_zoneingresses.yaml +++ b/charts/kuma/kuma/crds/kuma.io_zoneingresses.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.13.0 name: zoneingresses.kuma.io spec: group: kuma.io diff --git a/charts/kuma/kuma/crds/kuma.io_zoneingressinsights.yaml b/charts/kuma/kuma/crds/kuma.io_zoneingressinsights.yaml index 72a3a304f..87d2c06ab 100644 --- a/charts/kuma/kuma/crds/kuma.io_zoneingressinsights.yaml +++ b/charts/kuma/kuma/crds/kuma.io_zoneingressinsights.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.13.0 name: zoneingressinsights.kuma.io spec: group: kuma.io diff --git a/charts/kuma/kuma/crds/kuma.io_zoneinsights.yaml b/charts/kuma/kuma/crds/kuma.io_zoneinsights.yaml index 92cf14ad6..fa149598a 100644 --- a/charts/kuma/kuma/crds/kuma.io_zoneinsights.yaml +++ b/charts/kuma/kuma/crds/kuma.io_zoneinsights.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.13.0 name: zoneinsights.kuma.io spec: group: kuma.io diff --git a/charts/kuma/kuma/crds/kuma.io_zones.yaml b/charts/kuma/kuma/crds/kuma.io_zones.yaml index 09cb5d9c5..bcd73a05b 100644 --- a/charts/kuma/kuma/crds/kuma.io_zones.yaml +++ b/charts/kuma/kuma/crds/kuma.io_zones.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.13.0 name: zones.kuma.io spec: group: kuma.io diff --git a/charts/redpanda/redpanda/Chart.lock b/charts/redpanda/redpanda/Chart.lock index 908228a74..cba1014c1 100644 --- a/charts/redpanda/redpanda/Chart.lock +++ b/charts/redpanda/redpanda/Chart.lock @@ -6,4 +6,4 @@ dependencies: repository: https://charts.redpanda.com version: 0.1.5 digest: sha256:dd7afd55f6eb7e9b3a91b0e5eeda47138e23c255b32d277ad4cb3a7ad3ec1b1f -generated: "2023-08-29T00:48:34.845874471Z" +generated: "2023-08-29T23:24:16.635099387Z" diff --git a/charts/redpanda/redpanda/Chart.yaml b/charts/redpanda/redpanda/Chart.yaml index c1abd30d6..866b5a362 100644 --- a/charts/redpanda/redpanda/Chart.yaml +++ b/charts/redpanda/redpanda/Chart.yaml @@ -37,4 +37,4 @@ name: redpanda sources: - https://github.com/redpanda-data/helm-charts type: application -version: 5.1.7 +version: 5.2.0 diff --git a/charts/redpanda/redpanda/templates/console/configmap-and-deployment.yaml b/charts/redpanda/redpanda/templates/console/configmap-and-deployment.yaml index 265b22be6..e2d9486dc 100644 --- a/charts/redpanda/redpanda/templates/console/configmap-and-deployment.yaml +++ b/charts/redpanda/redpanda/templates/console/configmap-and-deployment.yaml @@ -61,7 +61,7 @@ limitations under the License. "enabled" $values.connectors.enabled "clusters" (list (dict - "url" (printf "http://%s.%s.svc.cluster.local:%s" (include "connectors.serviceName" $connectorsValues) .Release.Namespace ($values.connectors.connectors.restPort | toString )) + "url" (printf "http://%s.%s.svc.%s:%s" (include "connectors.serviceName" $connectorsValues) .Release.Namespace ($values.clusterDomain | trimSuffix ".") ($values.connectors.connectors.restPort | toString )) "name" "connectors" "tls" (dict "enabled" "false" @@ -248,4 +248,4 @@ limitations under the License. {{ $helmVars := merge $consoleValues $helmVars }} --- {{ include (print .Subcharts.console.Template.BasePath "/deployment.yaml") $helmVars }} -{{ end }} \ No newline at end of file +{{ end }} diff --git a/charts/redpanda/redpanda/templates/service.internal.yaml b/charts/redpanda/redpanda/templates/service.internal.yaml index 0f1cb94e1..1dcd041a8 100644 --- a/charts/redpanda/redpanda/templates/service.internal.yaml +++ b/charts/redpanda/redpanda/templates/service.internal.yaml @@ -28,6 +28,9 @@ metadata: {{- with include "full.labels" . }} {{- . | nindent 4 }} {{- end }} +{{- with dig "service" "internal" "annotations" dict .Values.AsMap }} + annotations: {{ toYaml . | nindent 4 }} +{{- end }} spec: type: ClusterIP publishNotReadyAddresses: true diff --git a/charts/redpanda/redpanda/values.schema.json b/charts/redpanda/redpanda/values.schema.json index a3bb2e3e8..14f43484b 100644 --- a/charts/redpanda/redpanda/values.schema.json +++ b/charts/redpanda/redpanda/values.schema.json @@ -55,6 +55,14 @@ "properties": { "name": { "type": "string" + }, + "internal": { + "type": "object", + "properties": { + "annotations": { + "type": "object" + } + } } } }, diff --git a/charts/redpanda/redpanda/values.yaml b/charts/redpanda/redpanda/values.yaml index 19cd9ebdf..93e78328b 100644 --- a/charts/redpanda/redpanda/values.yaml +++ b/charts/redpanda/redpanda/values.yaml @@ -60,6 +60,15 @@ image: # service: # -- set service.name to override the default service name # name: redpanda +# -- internal Service +# internal: +# -- add annotations to the internal Service +# annotations: {} +# +# -- eg. for a bare metal install using external-dns +# annotations: +# "external-dns.alpha.kubernetes.io/hostname": redpanda.domain.dom +# "external-dns.alpha.kubernetes.io/endpoints-type": HostIP # -- Pull secrets may be used to provide credentials to image repositories # See https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ diff --git a/charts/sysdig/sysdig/CHANGELOG.md b/charts/sysdig/sysdig/CHANGELOG.md index 0246ca1a5..8905f1e90 100644 --- a/charts/sysdig/sysdig/CHANGELOG.md +++ b/charts/sysdig/sysdig/CHANGELOG.md @@ -10,6 +10,12 @@ Manual edits are supported only below '## Change Log' and should be used exclusively to fix incorrect entries and not to add new ones. ## Change Log +# v1.16.9 +### Chores +* **sysdig, node-analyzer** [0ba5baa3](https://github.com/sysdiglabs/charts/commit/0ba5baa3e690be1c2c3a92c4928558efffb33539): bump sysdig/vuln-runtime-scanner to v1.5.5 ([#1320](https://github.com/sysdiglabs/charts/issues/1320)) + + * * Runtime Scanner bumped to 1.5.5 + * Fixed a bug that could cause the Runtime Scanner to perform unneeded rescans # v1.16.8 ### Chores * **sysdig, node-analyzer** [17d2e503](https://github.com/sysdiglabs/charts/commit/17d2e50326f587b154f43beb706627416ca6a4b6): bump sysdig/vuln-runtime-scanner to v1.5.4 ([#1305](https://github.com/sysdiglabs/charts/issues/1305)) diff --git a/charts/sysdig/sysdig/Chart.yaml b/charts/sysdig/sysdig/Chart.yaml index 06ce7e4f2..bf26c79b6 100644 --- a/charts/sysdig/sysdig/Chart.yaml +++ b/charts/sysdig/sysdig/Chart.yaml @@ -19,4 +19,4 @@ name: sysdig sources: - https://app.sysdigcloud.com/#/settings/user - https://github.com/draios/sysdig -version: 1.16.8 +version: 1.16.9 diff --git a/charts/sysdig/sysdig/README.md b/charts/sysdig/sysdig/README.md index cf6f1d063..e95834dc0 100644 --- a/charts/sysdig/sysdig/README.md +++ b/charts/sysdig/sysdig/README.md @@ -262,7 +262,7 @@ The following table lists the configurable parameters of the Sysdig chart and th | `nodeAnalyzer.runtimeScanner.deploy` | Deploys the Runtime Scanner. | `false` | | `nodeAnalyzer.runtimeScanner.extraMounts` | Specifies a container engine custom socket path (docker, containerd, CRI-O). | | | `nodeAnalyzer.runtimeScanner.image.repository` | The image repository to pull the Runtime Scanner from. | `sysdig/vuln-runtime-scanner` | -| `nodeAnalyzer.runtimeScanner.image.tag` | The image tag to pull the Runtime Scanner. | `1.5.4` | +| `nodeAnalyzer.runtimeScanner.image.tag` | The image tag to pull the Runtime Scanner. | `1.5.5` | | `nodeAnalyzer.runtimeScanner.image.digest` | The image digest to pull. | ` ` | | `nodeAnalyzer.runtimeScanner.image.pullPolicy` | The image pull policy for the Runtime Scanner. | `IfNotPresent` | | `nodeAnalyzer.runtimeScanner.resources.requests.cpu` | Runtime Scanner CPU requests per node. | `250m` | diff --git a/charts/sysdig/sysdig/RELEASE-NOTES.md b/charts/sysdig/sysdig/RELEASE-NOTES.md index ad77fdb69..53ffe00bb 100644 --- a/charts/sysdig/sysdig/RELEASE-NOTES.md +++ b/charts/sysdig/sysdig/RELEASE-NOTES.md @@ -1,8 +1,8 @@ # What's Changed ### Chores -- **sysdig, node-analyzer** [17d2e503](https://github.com/sysdiglabs/charts/commit/17d2e50326f587b154f43beb706627416ca6a4b6): bump sysdig/vuln-runtime-scanner to v1.5.4 ([#1305](https://github.com/sysdiglabs/charts/issues/1305)) +- **sysdig, node-analyzer** [0ba5baa3](https://github.com/sysdiglabs/charts/commit/0ba5baa3e690be1c2c3a92c4928558efffb33539): bump sysdig/vuln-runtime-scanner to v1.5.5 ([#1320](https://github.com/sysdiglabs/charts/issues/1320)) - * * Runtime Scanner bumped to 1.5.4 - * Fixed a misbehavior of the image layer analyzer, which could lead to non-existing software artifacts being reported in the SBOM as a result of incorrect handling of opaque directories (ESC-3511). -#### Full diff: https://github.com/sysdiglabs/charts/compare/sysdig-deploy-1.18.0...sysdig-1.16.8 + * * Runtime Scanner bumped to 1.5.5 + * Fixed a bug that could cause the Runtime Scanner to perform unneeded rescans +#### Full diff: https://github.com/sysdiglabs/charts/compare/sysdig-deploy-1.19.4...sysdig-1.16.9 diff --git a/charts/sysdig/sysdig/values.yaml b/charts/sysdig/sysdig/values.yaml index 0a15a4b74..14ffb54cf 100644 --- a/charts/sysdig/sysdig/values.yaml +++ b/charts/sysdig/sysdig/values.yaml @@ -505,7 +505,7 @@ nodeAnalyzer: deploy: false image: repository: sysdig/vuln-runtime-scanner - tag: 1.5.4 + tag: 1.5.5 digest: pullPolicy: IfNotPresent diff --git a/charts/yugabyte/yugabyte/.helmignore b/charts/yugabyte/yugabyte/.helmignore deleted file mode 100644 index 3598c3003..000000000 --- a/charts/yugabyte/yugabyte/.helmignore +++ /dev/null @@ -1 +0,0 @@ -tests \ No newline at end of file diff --git a/charts/yugabyte/yugabyte/Chart.yaml b/charts/yugabyte/yugabyte/Chart.yaml index 2f619973e..89bba8d44 100644 --- a/charts/yugabyte/yugabyte/Chart.yaml +++ b/charts/yugabyte/yugabyte/Chart.yaml @@ -3,20 +3,18 @@ annotations: catalog.cattle.io/display-name: YugabyteDB catalog.cattle.io/kube-version: '>=1.18-0' catalog.cattle.io/release-name: yugabyte - charts.openshift.io/name: yugabyte -apiVersion: v2 -appVersion: 2.18.2.1-b1 +apiVersion: v1 +appVersion: 2.14.12.0-b19 description: YugabyteDB is the high-performance distributed SQL database for building global, internet-scale apps. home: https://www.yugabyte.com icon: https://avatars0.githubusercontent.com/u/17074854?s=200&v=4 -kubeVersion: '>=1.18-0' maintainers: -- email: sanketh@yugabyte.com - name: Sanketh Indarapu -- email: gjalla@yugabyte.com - name: Govardhan Reddy Jalla +- email: ram@yugabyte.com + name: Ram Sri +- email: arnav@yugabyte.com + name: Arnav Agarwal name: yugabyte sources: - https://github.com/yugabyte/yugabyte-db -version: 2.18.2+1 +version: 2.14.12 diff --git a/charts/yugabyte/yugabyte/app-readme.md b/charts/yugabyte/yugabyte/app-readme.md index 61c618012..48c8c43d2 100644 --- a/charts/yugabyte/yugabyte/app-readme.md +++ b/charts/yugabyte/yugabyte/app-readme.md @@ -1 +1 @@ -This chart bootstraps an RF3 YugabyteDB version 2.18.2.1-b1 cluster using the Helm Package Manager. +This chart bootstraps an RF3 Yugabyte DB version 2.14.12.0-b19 cluster using the Helm Package Manager. diff --git a/charts/yugabyte/yugabyte/generate_kubeconfig.py b/charts/yugabyte/yugabyte/generate_kubeconfig.py index f4c2d14ab..b974c0f2d 100644 --- a/charts/yugabyte/yugabyte/generate_kubeconfig.py +++ b/charts/yugabyte/yugabyte/generate_kubeconfig.py @@ -11,209 +11,84 @@ from sys import exit import json import base64 import tempfile -import time -import os.path -def run_command(command_args, namespace=None, as_json=True, log_command=True): - command = ["kubectl"] +def run_command(command_args, namespace=None, as_json=True): + command = ['kubectl'] if namespace: - command.extend(["--namespace", namespace]) + command.extend(['--namespace', namespace]) command.extend(command_args) if as_json: - command.extend(["-o", "json"]) - if log_command: - print("Running command: {}".format(" ".join(command))) - output = check_output(command) - if as_json: - return json.loads(output) + command.extend(['-o', 'json']) + return json.loads(check_output(command)) else: - return output.decode("utf8") + return check_output(command).decode('utf8') -def create_sa_token_secret(directory, sa_name, namespace): - """Creates a service account token secret for sa_name in - namespace. Returns the name of the secret created. - - Ref: - https://k8s.io/docs/concepts/configuration/secret/#service-account-token-secrets - - """ - token_secret = { - "apiVersion": "v1", - "data": { - "do-not-delete-used-for-yugabyte-anywhere": "MQ==", - }, - "kind": "Secret", - "metadata": { - "annotations": { - "kubernetes.io/service-account.name": sa_name, - }, - "name": sa_name, - }, - "type": "kubernetes.io/service-account-token", - } - token_secret_file_name = os.path.join(directory, "token_secret.yaml") - with open(token_secret_file_name, "w") as token_secret_file: - json.dump(token_secret, token_secret_file) - run_command(["apply", "-f", token_secret_file_name], namespace) - return sa_name - - -def get_secret_data(secret, namespace): - """Returns the secret in JSON format if it has ca.crt and token in - it, else returns None. It retries 3 times with 1 second timeout - for the secret to be populated with this data. - - """ - secret_data = None - num_retries = 5 - timeout = 2 - while True: - secret_json = run_command(["get", "secret", secret], namespace) - if "ca.crt" in secret_json["data"] and "token" in secret_json["data"]: - secret_data = secret_json - break - - num_retries -= 1 - if num_retries == 0: - break - print( - "Secret '{}' is not populated. Sleep {}s, ({} retries left)".format( - secret, timeout, num_retries - ) - ) - time.sleep(timeout) - return secret_data - - -def get_secrets_for_sa(sa_name, namespace): - """Returns a list of all service account token secrets associated - with the given sa_name in the namespace. - - """ - secrets = run_command( - [ - "get", - "secret", - "--field-selector", - "type=kubernetes.io/service-account-token", - "-o", - 'jsonpath="{.items[?(@.metadata.annotations.kubernetes\.io/service-account\.name == "' - + sa_name - + '")].metadata.name}"', - ], - as_json=False, - ) - return secrets.strip('"').split() - - -parser = argparse.ArgumentParser(description="Generate KubeConfig with Token") -parser.add_argument("-s", "--service_account", help="Service Account name", required=True) -parser.add_argument("-n", "--namespace", help="Kubernetes namespace", default="kube-system") -parser.add_argument("-c", "--context", help="kubectl context") -parser.add_argument("-o", "--output_file", help="output file path") +parser = argparse.ArgumentParser(description='Generate KubeConfig with Token') +parser.add_argument('-s', '--service_account', help='Service Account name', required=True) +parser.add_argument('-n', '--namespace', help='Kubernetes namespace', default='kube-system') +parser.add_argument('-c', '--context', help='kubectl context') args = vars(parser.parse_args()) # if the context is not provided we use the current-context -context = args["context"] +context = args['context'] if context is None: - context = run_command(["config", "current-context"], args["namespace"], as_json=False) + context = run_command(['config', 'current-context'], + args['namespace'], as_json=False) -cluster_attrs = run_command( - ["config", "get-contexts", context.strip(), "--no-headers"], args["namespace"], as_json=False -) +cluster_attrs = run_command(['config', 'get-contexts', context.strip(), + '--no-headers'], args['namespace'], as_json=False) cluster_name = cluster_attrs.strip().split()[2] -endpoint = run_command( - [ - "config", - "view", - "-o", - 'jsonpath="{.clusters[?(@.name =="' + cluster_name + '")].cluster.server}"', - ], - args["namespace"], - as_json=False, -) -service_account_info = run_command(["get", "sa", args["service_account"]], args["namespace"]) - -tmpdir = tempfile.TemporaryDirectory() - -# Get the token and ca.crt from service account secret. -sa_secrets = list() - -# Get secrets specified in the service account, there can be multiple -# of them, and not all are service account token secrets. -if "secrets" in service_account_info: - sa_secrets = [secret["name"] for secret in service_account_info["secrets"]] - -# Find the existing additional service account token secrets -sa_secrets.extend(get_secrets_for_sa(args["service_account"], args["namespace"])) +endpoint = run_command(['config', 'view', '-o', + 'jsonpath="{.clusters[?(@.name =="' + + cluster_name + '")].cluster.server}"'], + args['namespace'], as_json=False) +service_account_info = run_command(['get', 'sa', args['service_account']], + args['namespace']) +# some ServiceAccounts have multiple secrets, and not all them have a +# ca.crt and a token. +sa_secrets = [secret['name'] for secret in service_account_info['secrets']] secret_data = None for secret in sa_secrets: - secret_data = get_secret_data(secret, args["namespace"]) - if secret_data is not None: - break - -# Kubernetes 1.22+ doesn't create the service account token secret by -# default, we have to create one. + secret_json = run_command(['get', 'secret', secret], args['namespace']) + if 'ca.crt' not in secret_json['data'] and 'token' not in secret_json['data']: + continue + secret_data = secret_json if secret_data is None: - print("No usable secret found for '{}', creating one.".format(args["service_account"])) - token_secret = create_sa_token_secret(tmpdir.name, args["service_account"], args["namespace"]) - secret_data = get_secret_data(token_secret, args["namespace"]) - if secret_data is None: - exit( - "Failed to generate kubeconfig: No usable credentials found for '{}'.".format( - args["service_account"] - ) - ) + exit("No usable secret found for '{}'.".format(args['service_account'])) +context_name = '{}-{}'.format(args['service_account'], cluster_name) +kube_config = '/tmp/{}.conf'.format(args['service_account']) -context_name = "{}-{}".format(args["service_account"], cluster_name) -kube_config = args["output_file"] -if not kube_config: - kube_config = "/tmp/{}.conf".format(args["service_account"]) +with tempfile.NamedTemporaryFile() as ca_crt_file: + ca_crt = base64.b64decode(secret_data['data']['ca.crt']) + ca_crt_file.write(ca_crt) + ca_crt_file.flush() + # create kubeconfig entry + set_cluster_cmd = ['config', 'set-cluster', cluster_name, + '--kubeconfig={}'.format(kube_config), + '--server={}'.format(endpoint.strip('"')), + '--embed-certs=true', + '--certificate-authority={}'.format(ca_crt_file.name)] + run_command(set_cluster_cmd, as_json=False) +user_token = base64.b64decode(secret_data['data']['token']).decode('utf-8') +set_credentials_cmd = ['config', 'set-credentials', context_name, + '--token={}'.format(user_token), + '--kubeconfig={}'.format(kube_config)] +run_command(set_credentials_cmd, as_json=False) -ca_crt_file_name = os.path.join(tmpdir.name, "ca.crt") -ca_crt_file = open(ca_crt_file_name, "wb") -ca_crt_file.write(base64.b64decode(secret_data["data"]["ca.crt"])) -ca_crt_file.close() - -# create kubeconfig entry -set_cluster_cmd = [ - "config", - "set-cluster", - cluster_name, - "--kubeconfig={}".format(kube_config), - "--server={}".format(endpoint.strip('"')), - "--embed-certs=true", - "--certificate-authority={}".format(ca_crt_file_name), -] -run_command(set_cluster_cmd, as_json=False) - -user_token = base64.b64decode(secret_data["data"]["token"]).decode("utf-8") -set_credentials_cmd = [ - "config", - "set-credentials", - context_name, - "--token={}".format(user_token), - "--kubeconfig={}".format(kube_config), -] -run_command(set_credentials_cmd, as_json=False, log_command=False) - -set_context_cmd = [ - "config", - "set-context", - context_name, - "--cluster={}".format(cluster_name), - "--user={}".format(context_name), - "--kubeconfig={}".format(kube_config), -] +set_context_cmd = ['config', 'set-context', context_name, + '--cluster={}'.format(cluster_name), + '--user={}'.format(context_name), + '--kubeconfig={}'.format(kube_config)] run_command(set_context_cmd, as_json=False) -use_context_cmd = ["config", "use-context", context_name, "--kubeconfig={}".format(kube_config)] +use_context_cmd = ['config', 'use-context', context_name, + '--kubeconfig={}'.format(kube_config)] run_command(use_context_cmd, as_json=False) print("Generated the kubeconfig file: {}".format(kube_config)) diff --git a/charts/yugabyte/yugabyte/openshift.values.yaml b/charts/yugabyte/yugabyte/openshift.values.yaml deleted file mode 100644 index d2784b23e..000000000 --- a/charts/yugabyte/yugabyte/openshift.values.yaml +++ /dev/null @@ -1,4 +0,0 @@ -# OCP compatible values for yugabyte - -Image: - repository: "quay.io/yugabyte/yugabyte-ubi" diff --git a/charts/yugabyte/yugabyte/questions.yaml b/charts/yugabyte/yugabyte/questions.yaml index 6befa49e1..c88fd43c0 100644 --- a/charts/yugabyte/yugabyte/questions.yaml +++ b/charts/yugabyte/yugabyte/questions.yaml @@ -16,7 +16,7 @@ questions: label: YugabyteDB image repository description: "YugabyteDB image repository" - variable: Image.tag - default: "2.5.1.0-b153" + default: "2.14.1.0-b36" required: true type: string label: YugabyteDB image tag diff --git a/charts/yugabyte/yugabyte/templates/_helpers.tpl b/charts/yugabyte/yugabyte/templates/_helpers.tpl index 7206a4270..460d711fe 100644 --- a/charts/yugabyte/yugabyte/templates/_helpers.tpl +++ b/charts/yugabyte/yugabyte/templates/_helpers.tpl @@ -26,7 +26,7 @@ Generate common labels. {{- define "yugabyte.labels" }} heritage: {{ .Values.helm2Legacy | ternary "Tiller" (.Release.Service | quote) }} release: {{ .Release.Name | quote }} -chart: {{ .Chart.Name | quote }} +chart: {{ .Values.oldNamingStyle | ternary .Chart.Name (include "yugabyte.chart" .) | quote }} component: {{ .Values.Component | quote }} {{- if .Values.commonLabels}} {{ toYaml .Values.commonLabels }} @@ -56,89 +56,6 @@ release: {{ .root.Release.Name | quote }} {{- end }} {{- end }} -{{/* -Create secrets in DBNamespace from other namespaces by iterating over envSecrets. -*/}} -{{- define "yugabyte.envsecrets" -}} -{{- range $v := .secretenv }} -{{- if $v.valueFrom.secretKeyRef.namespace }} -{{- $secretObj := (lookup -"v1" -"Secret" -$v.valueFrom.secretKeyRef.namespace -$v.valueFrom.secretKeyRef.name) -| default dict }} -{{- $secretData := (get $secretObj "data") | default dict }} -{{- $secretValue := (get $secretData $v.valueFrom.secretKeyRef.key) | default "" }} -{{- if (and (not $secretValue) (not $v.valueFrom.secretKeyRef.optional)) }} -{{- required (printf "Secret or key missing for %s/%s in namespace: %s" -$v.valueFrom.secretKeyRef.name -$v.valueFrom.secretKeyRef.key -$v.valueFrom.secretKeyRef.namespace) -nil }} -{{- end }} -{{- if $secretValue }} -apiVersion: v1 -kind: Secret -metadata: - {{- $secretfullname := printf "%s-%s-%s-%s" - $.root.Release.Name - $v.valueFrom.secretKeyRef.namespace - $v.valueFrom.secretKeyRef.name - $v.valueFrom.secretKeyRef.key - }} - name: {{ printf "%s-%s-%s-%s-%s-%s" - $.root.Release.Name - ($v.valueFrom.secretKeyRef.namespace | substr 0 5) - ($v.valueFrom.secretKeyRef.name | substr 0 5) - ( $v.valueFrom.secretKeyRef.key | substr 0 5) - (sha256sum $secretfullname | substr 0 4) - ($.suffix) - | lower | replace "." "" | replace "_" "" - }} - namespace: "{{ $.root.Release.Namespace }}" - labels: - {{- include "yugabyte.labels" $.root | indent 4 }} -type: Opaque # should it be an Opaque secret? -data: - {{ $v.valueFrom.secretKeyRef.key }}: {{ $secretValue | quote }} -{{- end }} -{{- end }} ---- -{{- end }} -{{- end }} - -{{/* -Add env secrets to DB statefulset. -*/}} -{{- define "yugabyte.addenvsecrets" -}} -{{- range $v := .secretenv }} -- name: {{ $v.name }} - valueFrom: - secretKeyRef: - {{- if $v.valueFrom.secretKeyRef.namespace }} - {{- $secretfullname := printf "%s-%s-%s-%s" - $.root.Release.Name - $v.valueFrom.secretKeyRef.namespace - $v.valueFrom.secretKeyRef.name - $v.valueFrom.secretKeyRef.key - }} - name: {{ printf "%s-%s-%s-%s-%s-%s" - $.root.Release.Name - ($v.valueFrom.secretKeyRef.namespace | substr 0 5) - ($v.valueFrom.secretKeyRef.name | substr 0 5) - ($v.valueFrom.secretKeyRef.key | substr 0 5) - (sha256sum $secretfullname | substr 0 4) - ($.suffix) - | lower | replace "." "" | replace "_" "" - }} - {{- else }} - name: {{ $v.valueFrom.secretKeyRef.name }} - {{- end }} - key: {{ $v.valueFrom.secretKeyRef.key }} - optional: {{ $v.valueFrom.secretKeyRef.optional | default "false" }} -{{- end }} -{{- end }} {{/* Create Volume name. */}} @@ -167,21 +84,18 @@ Generate a preflight check script invocation. */}} {{- define "yugabyte.preflight_check" -}} {{- if not .Values.preflight.skipAll -}} -{{- $port := .Preflight.Port -}} -{{- range $addr := split "," .Preflight.Addr -}} if [ -f /home/yugabyte/tools/k8s_preflight.py ]; then PYTHONUNBUFFERED="true" /home/yugabyte/tools/k8s_preflight.py \ dnscheck \ - --addr="{{ $addr }}" \ -{{- if not $.Values.preflight.skipBind }} - --port="{{ $port }}" + --addr="{{ .Preflight.Addr }}" \ +{{- if not .Values.preflight.skipBind }} + --port="{{ .Preflight.Port }}" {{- else }} --skip_bind {{- end }} fi && \ -{{ end }} -{{- end }} -{{- end }} +{{- end -}} +{{- end -}} {{/* Get YugaByte fs data directories. @@ -206,20 +120,12 @@ Get files from fs data directories for readiness / liveness probes. Generate server FQDN. */}} {{- define "yugabyte.server_fqdn" -}} - {{- if .Values.multicluster.createServicePerPod -}} + {{- if (and .Values.istioCompatibility.enabled .Values.multicluster.createServicePerPod) -}} {{- printf "$(HOSTNAME).$(NAMESPACE).svc.%s" .Values.domainName -}} - {{- else if (and .Values.oldNamingStyle .Values.multicluster.createServiceExports) -}} - {{ $membershipName := required "A valid membership name is required! Please set multicluster.kubernetesClusterId" .Values.multicluster.kubernetesClusterId }} - {{- printf "$(HOSTNAME).%s.%s.$(NAMESPACE).svc.clusterset.local" $membershipName .Service.name -}} {{- else if .Values.oldNamingStyle -}} {{- printf "$(HOSTNAME).%s.$(NAMESPACE).svc.%s" .Service.name .Values.domainName -}} {{- else -}} - {{- if .Values.multicluster.createServiceExports -}} - {{ $membershipName := required "A valid membership name is required! Please set multicluster.kubernetesClusterId" .Values.multicluster.kubernetesClusterId }} - {{- printf "$(HOSTNAME).%s.%s-%s.$(NAMESPACE).svc.clusterset.local" $membershipName (include "yugabyte.fullname" .) .Service.name -}} - {{- else -}} - {{- printf "$(HOSTNAME).%s-%s.$(NAMESPACE).svc.%s" (include "yugabyte.fullname" .) .Service.name .Values.domainName -}} - {{- end -}} + {{- printf "$(HOSTNAME).%s-%s.$(NAMESPACE).svc.%s" (include "yugabyte.fullname" .) .Service.name .Values.domainName -}} {{- end -}} {{- end -}} @@ -232,25 +138,10 @@ Generate server broadcast address. {{/* Generate server RPC bind address. - -In case of multi-cluster services (MCS), we set it to $(POD_IP) to -ensure YCQL uses a resolvable address. -See https://github.com/yugabyte/yugabyte-db/issues/16155 - -We use a workaround for above in case of Istio by setting it to -$(POD_IP) and localhost. Master doesn't support that combination, so -we stick to 0.0.0.0, which works for master. */}} {{- define "yugabyte.rpc_bind_address" -}} - {{- $port := index .Service.ports "tcp-rpc-port" -}} {{- if .Values.istioCompatibility.enabled -}} - {{- if (eq .Service.name "yb-masters") -}} - 0.0.0.0:{{ $port }} - {{- else -}} - $(POD_IP):{{ $port }},127.0.0.1:{{ $port }} - {{- end -}} - {{- else if (or .Values.multicluster.createServiceExports .Values.multicluster.createServicePerPod) -}} - $(POD_IP):{{ $port }} + 0.0.0.0:{{ index .Service.ports "tcp-rpc-port" -}} {{- else -}} {{- include "yugabyte.server_fqdn" . -}} {{- end -}} @@ -267,7 +158,7 @@ Generate server web interface. Generate server CQL proxy bind address. */}} {{- define "yugabyte.cql_proxy_bind_address" -}} - {{- if or .Values.istioCompatibility.enabled .Values.multicluster.createServiceExports .Values.multicluster.createServicePerPod -}} + {{- if .Values.istioCompatibility.enabled -}} 0.0.0.0:{{ index .Service.ports "tcp-yql-port" -}} {{- else -}} {{- include "yugabyte.server_fqdn" . -}} @@ -312,10 +203,10 @@ Compute the maximum number of unavailable pods based on the number of master rep Set consistent issuer name. */}} {{- define "yugabyte.tls_cm_issuer" -}} - {{- if .Values.tls.certManager.bootstrapSelfsigned -}} - {{ .Values.oldNamingStyle | ternary "yugabyte-selfsigned" (printf "%s-selfsigned" (include "yugabyte.fullname" .)) }} + {{- if .Values.tls.certManager.useClusterIssuer -}} + {{ .Values.tls.certManager.clusterIssuer }} {{- else -}} - {{ .Values.tls.certManager.useClusterIssuer | ternary .Values.tls.certManager.clusterIssuer .Values.tls.certManager.issuer}} + {{ .Values.oldNamingStyle | ternary "yugabyte-selfsigned" (printf "%s-selfsigned" (include "yugabyte.fullname" .)) }} {{- end -}} {{- end -}} @@ -355,51 +246,3 @@ Set consistent issuer name. {{- end -}} {{- end -}} {{- end -}} - -{{/* - Default nodeAffinity for multi-az deployments -*/}} -{{- define "yugabyte.multiAZNodeAffinity" -}} -requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: failure-domain.beta.kubernetes.io/zone - operator: In - values: - - {{ .Values.AZ }} - - matchExpressions: - - key: topology.kubernetes.io/zone - operator: In - values: - - {{ .Values.AZ }} -{{- end -}} - -{{/* - Default podAntiAffinity for master and tserver - - This requires "appLabelArgs" to be passed in - defined in service.yaml - we have a .root and a .label in appLabelArgs -*/}} -{{- define "yugabyte.podAntiAffinity" -}} -preferredDuringSchedulingIgnoredDuringExecution: -- weight: 100 - podAffinityTerm: - labelSelector: - matchExpressions: - {{- if .root.Values.oldNamingStyle }} - - key: app - operator: In - values: - - "{{ .label }}" - {{- else }} - - key: app.kubernetes.io/name - operator: In - values: - - "{{ .label }}" - - key: release - operator: In - values: - - {{ .root.Release.Name | quote }} - {{- end }} - topologyKey: kubernetes.io/hostname -{{- end -}} diff --git a/charts/yugabyte/yugabyte/templates/certificates.yaml b/charts/yugabyte/yugabyte/templates/certificates.yaml index 07fc2e5f5..f8dd4acb5 100644 --- a/charts/yugabyte/yugabyte/templates/certificates.yaml +++ b/charts/yugabyte/yugabyte/templates/certificates.yaml @@ -1,7 +1,7 @@ {{- $root := . -}} --- {{- if $root.Values.tls.certManager.enabled }} -{{- if $root.Values.tls.certManager.bootstrapSelfsigned }} +{{- if not $root.Values.tls.certManager.useClusterIssuer }} --- apiVersion: cert-manager.io/v1 kind: Issuer @@ -37,38 +37,13 @@ spec: ca: secretName: {{ $root.Values.oldNamingStyle | ternary "yugabyte-ca" (printf "%s-ca" (include "yugabyte.fullname" $root)) }} --- -{{- else }} -{{/* when bootstrapSelfsigned = false, ie. when using an external CA. -Create a Secret with just the rootCA.cert value and mount into master/tserver pods. -This will be used as a fall back in case the Secret generated by cert-manager does not -have a root ca.crt. This can happen for certain certificate issuers like LetsEncrypt. -*/}} ---- -apiVersion: v1 -kind: Secret -metadata: - name: {{ printf "%s-root-ca" (include "yugabyte.fullname" $root) }} - namespace: "{{ $root.Release.Namespace }}" - labels: - {{- include "yugabyte.labels" $root | indent 4 }} -type: Opaque -data: - ca.crt: {{ $root.Values.tls.rootCA.cert }} ---- {{- end }} -{{/* -The below Certificate resource will trigger cert-manager to issue crt/key into Secrets. -These secrets are mounted into master/tserver pods. -*/}} {{- range .Values.Services }} {{- $service := . -}} {{- $appLabelArgs := dict "label" .label "root" $root -}} {{- $serviceValues := (dict "Service" $service "Values" $root.Values "Chart" $root.Chart "Release" $root.Release) -}} -{{- $replicas := (eq .name "yb-masters") | ternary $root.Values.replicas.master $root.Values.replicas.tserver -}} - -{{- if (gt (int $replicas) 0) }} --- apiVersion: cert-manager.io/v1 kind: Certificate @@ -90,29 +65,28 @@ spec: secretName: {{ $root.Values.oldNamingStyle | ternary (printf "%s-yugabyte-tls-cert" $service.label) (printf "%s-%s-tls-cert" (include "yugabyte.fullname" $root) $service.label) }} duration: {{ $root.Values.tls.certManager.certificates.duration | quote }} renewBefore: {{ $root.Values.tls.certManager.certificates.renewBefore | quote }} + commonName: yugabyte-{{ .name }} isCA: false privateKey: algorithm: {{ $root.Values.tls.certManager.certificates.algorithm | quote }} encoding: PKCS8 size: {{ $root.Values.tls.certManager.certificates.keySize }} - rotationPolicy: Always usages: - server auth - client auth # At least one of a DNS Name, URI, or IP address is required. dnsNames: + {{- $replicas := (eq .name "yb-masters") | ternary $root.Values.replicas.master $root.Values.replicas.tserver -}} {{- range $index := until ( int ( $replicas ) ) }} {{- $nodeOldStyle := printf "%s-%d.%s.%s.svc.%s" $service.label $index $service.name $root.Release.Namespace $root.Values.domainName }} {{- $nodeNewStyle := printf "%s-%s-%d.%s-%s.%s.svc.%s" (include "yugabyte.fullname" $root) $service.label $index (include "yugabyte.fullname" $root) $service.name $root.Release.Namespace $root.Values.domainName }} {{- $node := $root.Values.oldNamingStyle | ternary $nodeOldStyle $nodeNewStyle }} - {{$node}} {{- end }} - - {{ printf "%s-%s.%s.svc.%s" (include "yugabyte.fullname" $root) $service.name $root.Release.Namespace $root.Values.domainName }} uris: [] ipAddresses: [] --- {{- end }} -{{- end }} --- apiVersion: cert-manager.io/v1 @@ -140,7 +114,6 @@ spec: algorithm: {{ $root.Values.tls.certManager.certificates.algorithm | quote }} encoding: PKCS8 size: {{ $root.Values.tls.certManager.certificates.keySize }} - rotationPolicy: Always usages: - client auth dnsNames: [] diff --git a/charts/yugabyte/yugabyte/templates/debug_config_map.yaml b/charts/yugabyte/yugabyte/templates/debug_config_map.yaml deleted file mode 100644 index a15c4fc9a..000000000 --- a/charts/yugabyte/yugabyte/templates/debug_config_map.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "yugabyte.fullname" . }}-master-hooks - namespace: "{{ .Release.Namespace }}" -data: -{{- range $index := until ( int ( .Values.replicas.master ) ) }} - yb-master-{{.}}-pre_debug_hook.sh: "echo 'hello-from-pre' " - yb-master-{{.}}-post_debug_hook.sh: "echo 'hello-from-post' " -{{- end }} ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "yugabyte.fullname" . }}-tserver-hooks - namespace: "{{ .Release.Namespace }}" -data: -{{- range $index := until ( int ( .Values.replicas.tserver) ) }} - yb-tserver-{{.}}-pre_debug_hook.sh: "echo 'hello-from-pre' " - yb-tserver-{{.}}-post_debug_hook.sh: "echo 'hello-from-post' " -{{- end }} ---- diff --git a/charts/yugabyte/yugabyte/templates/multicluster/common-tserver-service.yaml b/charts/yugabyte/yugabyte/templates/multicluster-common-tserver-service.yaml similarity index 100% rename from charts/yugabyte/yugabyte/templates/multicluster/common-tserver-service.yaml rename to charts/yugabyte/yugabyte/templates/multicluster-common-tserver-service.yaml diff --git a/charts/yugabyte/yugabyte/templates/multicluster/service-per-pod.yaml b/charts/yugabyte/yugabyte/templates/multicluster-multiple-services.yaml similarity index 82% rename from charts/yugabyte/yugabyte/templates/multicluster/service-per-pod.yaml rename to charts/yugabyte/yugabyte/templates/multicluster-multiple-services.yaml index 15e09dce8..a26b39018 100644 --- a/charts/yugabyte/yugabyte/templates/multicluster/service-per-pod.yaml +++ b/charts/yugabyte/yugabyte/templates/multicluster-multiple-services.yaml @@ -11,19 +11,11 @@ metadata: labels: {{- include "yugabyte.applabel" ($appLabelArgs) | indent 4 }} {{- include "yugabyte.labels" $ | indent 4 }} - service-type: "non-endpoint" spec: ports: {{- range $label, $port := $server.ports }} - {{- if (eq $label "grpc-ybc-port") }} - {{- if $.Values.ybc.enabled }} - name: {{ $label | quote }} port: {{ $port }} - {{- end }} - {{- else }} - - name: {{ $label | quote }} - port: {{ $port }} - {{- end }} {{- end}} selector: statefulset.kubernetes.io/pod-name: {{ $podName | quote }} diff --git a/charts/yugabyte/yugabyte/templates/multicluster/mcs-service-export.yaml b/charts/yugabyte/yugabyte/templates/multicluster/mcs-service-export.yaml deleted file mode 100644 index eeafcb1bb..000000000 --- a/charts/yugabyte/yugabyte/templates/multicluster/mcs-service-export.yaml +++ /dev/null @@ -1,21 +0,0 @@ -{{- /* - Ref - https://cloud.google.com/kubernetes-engine/docs/how-to/multi-cluster-services#registering_a_service_for_export - https://github.com/kubernetes/enhancements/tree/master/keps/sig-multicluster/1645-multi-cluster-services-api#exporting-services -*/}} -{{- if .Values.multicluster.createServiceExports }} -apiVersion: {{ .Values.multicluster.mcsApiVersion }} -kind: ServiceExport -metadata: - name: {{ .Values.oldNamingStyle | ternary "yb-masters" (printf "%s-%s" (include "yugabyte.fullname" .) "yb-masters") | quote }} - namespace: "{{ .Release.Namespace }}" - labels: - {{- include "yugabyte.labels" . | indent 4 }} ---- -apiVersion: {{ .Values.multicluster.mcsApiVersion }} -kind: ServiceExport -metadata: - name: {{ .Values.oldNamingStyle | ternary "yb-tservers" (printf "%s-%s" (include "yugabyte.fullname" .) "yb-tservers") | quote }} - namespace: "{{ .Release.Namespace }}" - labels: - {{- include "yugabyte.labels" . | indent 4 }} -{{ end -}} diff --git a/charts/yugabyte/yugabyte/templates/secrets.yaml b/charts/yugabyte/yugabyte/templates/secrets.yaml deleted file mode 100644 index 0bd903457..000000000 --- a/charts/yugabyte/yugabyte/templates/secrets.yaml +++ /dev/null @@ -1,7 +0,0 @@ -{{- $root := . -}} ---- # Create secrets from other namespaces for masters. -{{- $data := dict "secretenv" $.Values.master.secretEnv "root" . "suffix" "master"}} -{{- include "yugabyte.envsecrets" $data }} ---- # Create secrets from other namespaces for tservers. -{{- $data := dict "secretenv" $.Values.tserver.secretEnv "root" . "suffix" "tserver" }} -{{- include "yugabyte.envsecrets" $data }} \ No newline at end of file diff --git a/charts/yugabyte/yugabyte/templates/service.yaml b/charts/yugabyte/yugabyte/templates/service.yaml index 6a7ea4683..697e4776d 100644 --- a/charts/yugabyte/yugabyte/templates/service.yaml +++ b/charts/yugabyte/yugabyte/templates/service.yaml @@ -24,7 +24,7 @@ data: {{- end }} --- {{- end }} ---- + {{- range .Values.Services }} {{- $service := . -}} {{- $appLabelArgs := dict "label" .label "root" $root -}} @@ -46,29 +46,12 @@ data: {{- range $index := until ( int ( $replicas ) ) }} {{- $nodeOldStyle := printf "%s-%d.%s.%s.svc.%s" $service.label $index $service.name $root.Release.Namespace $root.Values.domainName }} {{- $nodeNewStyle := printf "%s-%s-%d.%s-%s.%s.svc.%s" (include "yugabyte.fullname" $root) $service.label $index (include "yugabyte.fullname" $root) $service.name $root.Release.Namespace $root.Values.domainName }} - -{{- if $root.Values.multicluster.createServiceExports -}} - {{- $nodeOldStyle = printf "%s-%d.%s.%s.%s.svc.clusterset.local" $service.label $index $root.Values.multicluster.kubernetesClusterId $service.name $root.Release.Namespace }} - {{- $nodeNewStyle = printf "%s-%s-%d.%s.%s-%s.%s.svc.clusterset.local" (include "yugabyte.fullname" $root) $service.label $index $root.Values.multicluster.kubernetesClusterId (include "yugabyte.fullname" $root) $service.name $root.Release.Namespace }} -{{- end -}} - -{{- if $root.Values.multicluster.createServicePerPod -}} - {{- $nodeOldStyle = printf "%s-%d.%s.svc.%s" $service.label $index $root.Release.Namespace $root.Values.domainName }} - {{- $nodeNewStyle = printf "%s-%s-%d.%s.svc.%s" (include "yugabyte.fullname" $root) $service.label $index $root.Release.Namespace $root.Values.domainName }} -{{- end -}} - {{- $node := $root.Values.oldNamingStyle | ternary $nodeOldStyle $nodeNewStyle }} {{- if $root.Values.tls.rootCA.key }} -{{- $dns1 := printf "*.%s-%s.%s" (include "yugabyte.fullname" $root) $service.name $root.Release.Namespace }} +{{- $dns1 := printf "*.*.%s" $root.Release.Namespace }} {{- $dns2 := printf "%s.svc.%s" $dns1 $root.Values.domainName }} -{{- if $root.Values.multicluster.createServiceExports -}} - {{- $dns1 = printf "*.%s.%s-%s.%s.svc.clusterset.local" $root.Values.multicluster.kubernetesClusterId (include "yugabyte.fullname" $root) $service.name $root.Release.Namespace }} -{{- end -}} -{{- if $root.Values.multicluster.createServicePerPod -}} - {{- $dns1 = printf "*.%s.svc.%s" $root.Release.Namespace $root.Values.domainName }} -{{- end -}} {{- $rootCA := buildCustomCert $root.Values.tls.rootCA.cert $root.Values.tls.rootCA.key -}} -{{- $server := genSignedCert $node ( default nil ) (list $node $dns1 $dns2 ) 3650 $rootCA }} +{{- $server := genSignedCert $node ( default nil ) (list $dns1 $dns2 ) 3650 $rootCA }} node.{{$node}}.crt: {{ $server.Cert | b64enc }} node.{{$node}}.key: {{ $server.Key | b64enc }} {{- else }} @@ -92,20 +75,13 @@ spec: clusterIP: None ports: {{- range $label, $port := .ports }} - {{- if (eq $label "grpc-ybc-port") }} - {{- if $root.Values.ybc.enabled }} - name: {{ $label | quote }} port: {{ $port }} - {{- end }} - {{- else }} - - name: {{ $label | quote }} - port: {{ $port }} - {{- end }} {{- end}} selector: {{- include "yugabyte.appselector" ($appLabelArgs) | indent 4 }} -{{- if $root.Values.enableLoadBalancer }} +{{ if $root.Values.enableLoadBalancer }} {{- range $endpoint := $root.Values.serviceEndpoints }} {{- if eq $service.label $endpoint.app }} --- @@ -118,12 +94,11 @@ metadata: labels: {{- include "yugabyte.applabel" ($appLabelArgs) | indent 4 }} {{- include "yugabyte.labels" $root | indent 4 }} - service-type: "endpoint" spec: - {{- if eq $root.Release.Service "Tiller" }} + {{ if eq $root.Release.Service "Tiller" }} clusterIP: - {{- else }} - {{- if $endpoint.clusterIP }} + {{ else }} + {{ if $endpoint.clusterIP }} clusterIP: {{ $endpoint.clusterIP }} {{- end }} {{- end }} @@ -141,7 +116,7 @@ spec: {{- end }} {{- end}} {{- end}} -{{- end}} +{{ end }} --- apiVersion: apps/v1 @@ -222,9 +197,6 @@ spec: labels: {{- include "yugabyte.applabel" ($appLabelArgs) | indent 8 }} {{- include "yugabyte.labels" $root | indent 8 }} - {{- if $root.Values.istioCompatibility.enabled }} - sidecar.istio.io/inject: "true" - {{- end }} {{- if eq .name "yb-masters" }} {{- with $root.Values.master.podLabels }}{{ toYaml . | nindent 8 }}{{ end }} {{- else }} @@ -242,6 +214,7 @@ spec: nodeSelector: {{ toYaml $root.Values.nodeSelector | indent 8 }} {{- end }} + terminationGracePeriodSeconds: 300 {{- if eq .name "yb-masters" }} # yb-masters {{- if $root.Values.master.tolerations }} tolerations: @@ -253,72 +226,50 @@ spec: {{- with $root.Values.tserver.tolerations }}{{ toYaml . | nindent 8 }}{{ end }} {{- end }} {{- end }} - terminationGracePeriodSeconds: 300 affinity: - # Set the anti-affinity selector scope to YB masters and tservers. - {{- $nodeAffinityData := dict}} - {{- if eq .name "yb-masters" -}} - {{- $nodeAffinityData = get $root.Values.master.affinity "nodeAffinity" | default (dict) -}} - {{- else -}} - {{- $nodeAffinityData = get $root.Values.tserver.affinity "nodeAffinity" | default (dict) -}} - {{- end -}} + # Set the anti-affinity selector scope to YB masters. {{ if $root.Values.AZ }} - {{- $userSelectorTerms := dig "requiredDuringSchedulingIgnoredDuringExecution" "nodeSelectorTerms" "" $nodeAffinityData | default (list) -}} - {{- $baseAffinity := include "yugabyte.multiAZNodeAffinity" $root | fromYaml -}} - {{- $requiredSchedule := (list) -}} - {{- if $userSelectorTerms -}} - {{- range $userSelectorTerms -}} - {{- $userTerm := . -}} - {{- range $baseAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms -}} - {{- $matchExpr := concat .matchExpressions $userTerm.matchExpressions | dict "matchExpressions" -}} - {{- $requiredSchedule = mustMerge $matchExpr $userTerm | append $requiredSchedule -}} - {{- end -}} - {{- end -}} - {{- else -}} - {{- $requiredSchedule = $baseAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms -}} - {{- end -}} - - {{- with $baseAffinity.requiredDuringSchedulingIgnoredDuringExecution -}} - {{- $_ := set . "nodeSelectorTerms" $requiredSchedule -}} - {{- end -}} - {{- $nodeAffinityData = mustMerge $baseAffinity $nodeAffinityData -}} - {{- end -}} - - {{- $podAntiAffinityData := dict -}} - {{- $basePodAntiAffinity := include "yugabyte.podAntiAffinity" ($appLabelArgs) | fromYaml -}} - {{- if eq .name "yb-masters" -}} - {{- with $root.Values.master.affinity -}} - {{- $userPodAntiAffinity := get . "podAntiAffinity" | default (dict) -}} - {{- if $userPodAntiAffinity -}} - {{- $preferredList := dig "preferredDuringSchedulingIgnoredDuringExecution" "" $userPodAntiAffinity | default (list) | concat $basePodAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution}} - {{- $_ := set $basePodAntiAffinity "preferredDuringSchedulingIgnoredDuringExecution" $preferredList -}} - {{- end -}} - {{- $podAntiAffinityData = mustMerge $basePodAntiAffinity $userPodAntiAffinity -}} - {{- end -}} - {{- else -}} - {{- with $root.Values.tserver.affinity -}} - {{- $userPodAntiAffinity := get . "podAntiAffinity" | default (dict) -}} - {{- if $userPodAntiAffinity -}} - {{- $preferredList := dig "preferredDuringSchedulingIgnoredDuringExecution" "" $userPodAntiAffinity | default (list) | concat $basePodAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution}} - {{- $_ := set $basePodAntiAffinity "preferredDuringSchedulingIgnoredDuringExecution" $preferredList -}} - {{- end -}} - {{- $podAntiAffinityData = mustMerge $basePodAntiAffinity $userPodAntiAffinity -}} - {{- end -}} - {{- end -}} - - {{- if eq .name "yb-masters" -}} - {{- if $nodeAffinityData -}} - {{- $_ := set $root.Values.master.affinity "nodeAffinity" $nodeAffinityData -}} - {{- end -}} - {{- $_ := set $root.Values.master.affinity "podAntiAffinity" $podAntiAffinityData -}} - {{ toYaml $root.Values.master.affinity | nindent 8 }} - {{- else -}} - {{- if $nodeAffinityData -}} - {{- $_ := set $root.Values.tserver.affinity "nodeAffinity" $nodeAffinityData -}} - {{- end -}} - {{- $_ := set $root.Values.tserver.affinity "podAntiAffinity" $podAntiAffinityData -}} - {{ toYaml $root.Values.tserver.affinity | nindent 8 }} + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: failure-domain.beta.kubernetes.io/zone + operator: In + values: + - {{ $root.Values.AZ }} + - matchExpressions: + - key: topology.kubernetes.io/zone + operator: In + values: + - {{ $root.Values.AZ }} {{ end }} + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + {{- if $root.Values.oldNamingStyle }} + - key: app + operator: In + values: + - "{{ .label }}" + {{- else }} + - key: app.kubernetes.io/name + operator: In + values: + - "{{ .label }}" + - key: release + operator: In + values: + - {{ $root.Release.Name | quote }} + {{- end }} + topologyKey: kubernetes.io/hostname + {{- if eq .name "yb-masters" }} + {{- with $root.Values.master.affinity }}{{ toYaml . | nindent 8 }}{{ end }} + {{- else }} + {{- with $root.Values.tserver.affinity }}{{ toYaml . | nindent 8 }}{{ end }} + {{- end }} containers: - name: "{{ .label }}" image: "{{ $root.Values.Image.repository }}:{{ $root.Values.Image.tag }}" @@ -365,20 +316,18 @@ spec: - name: YBDEVOPS_CORECOPY_DIR value: "/mnt/disk0/cores" {{- if eq .name "yb-masters" }} - {{- with $root.Values.master.extraEnv }}{{ toYaml . | nindent 8 }}{{- end }} - {{- $data := dict "secretenv" $root.Values.master.secretEnv "root" $root "suffix" "master"}} - {{- include "yugabyte.addenvsecrets" $data | nindent 8 }} + {{- with $root.Values.master.extraEnv }}{{ toYaml . | nindent 8 }}{{ end }} + {{- with $root.Values.master.secretEnv }}{{ toYaml . | nindent 8 }}{{ end }} {{- else }} - {{- with $root.Values.tserver.extraEnv }}{{ toYaml . | nindent 8 }}{{- end }} - {{- $data := dict "secretenv" $root.Values.tserver.secretEnv "root" $root "suffix" "tserver" }} - {{- include "yugabyte.addenvsecrets" $data | nindent 8 }} + {{- with $root.Values.tserver.extraEnv }}{{ toYaml . | nindent 8 }}{{ end }} + {{- with $root.Values.tserver.secretEnv }}{{ toYaml . | nindent 8 }}{{ end }} {{- end }} {{- if and $root.Values.tls.enabled $root.Values.tls.clientToServer (ne .name "yb-masters") }} - name: SSL_CERTFILE value: /root/.yugabytedb/root.crt {{- end }} resources: - {{- if eq .name "yb-masters" }} + {{ if eq .name "yb-masters" }} {{ toYaml $root.Values.resource.master | indent 10 }} {{ else }} {{ toYaml $root.Values.resource.tserver | indent 10 }} @@ -409,13 +358,10 @@ spec: {{- $rpcPreflight := include "yugabyte.preflight_check" (set $serviceValues "Preflight" $rpcDict) -}} {{- if $rpcPreflight -}}{{ $rpcPreflight | nindent 12 }}{{ end -}} {{- $broadcastAddr := include "yugabyte.server_broadcast_address" $serviceValues -}} - {{/* skip bind check for servicePerPod multi-cluster, we cannot/don't bind to service IP */}} - {{- if not $root.Values.multicluster.createServicePerPod }} - {{- $broadcastPort := index $service.ports "tcp-rpc-port" -}} - {{- $broadcastDict := dict "Addr" $broadcastAddr "Port" $broadcastPort -}} - {{- $broadcastPreflight := include "yugabyte.preflight_check" (set $serviceValues "Preflight" $broadcastDict) -}} - {{- if $broadcastPreflight -}}{{ $broadcastPreflight | nindent 12 }}{{ end -}} - {{- end }} + {{- $broadcastPort := index $service.ports "tcp-rpc-port" -}} + {{- $broadcastDict := dict "Addr" $broadcastAddr "Port" $broadcastPort -}} + {{- $broadcastPreflight := include "yugabyte.preflight_check" (set $serviceValues "Preflight" $broadcastDict) -}} + {{- if $broadcastPreflight -}}{{ $broadcastPreflight | nindent 12 }}{{ end -}} {{- $webserverAddr := include "yugabyte.webserver_interface" $serviceValues -}} {{- $webserverPort := index $service.ports "http-ui" -}} {{- $webserverDict := dict "Addr" $webserverAddr "Port" $webserverPort -}} @@ -426,25 +372,6 @@ spec: else k8s_parent="" fi && \ - {{- if and $root.Values.tls.enabled $root.Values.tls.certManager.enabled }} - echo "Creating ephemeral /opt/certs/yugabyte/ as symlink to persisted /mnt/disk0/certs/" && \ - mkdir -p /mnt/disk0/certs && \ - mkdir -p /opt/certs && \ - ln -s /mnt/disk0/certs /opt/certs/yugabyte && \ - if [[ ! -f /opt/certs/yugabyte/ca.crt ]]; then - echo "Fresh install of /opt/certs/yugabyte/ca.crt" - cp /home/yugabyte/cert-manager/ca.crt /opt/certs/yugabyte/ca.crt; - fi && \ - cmp -s /home/yugabyte/cert-manager/ca.crt /opt/certs/yugabyte/ca.crt;sameRootCA=$? && \ - if [[ $sameRootCA -eq 0 ]]; then - echo "Refreshing tls certs at /opt/certs/yugabyte/"; - cp /home/yugabyte/cert-manager/tls.crt /opt/certs/yugabyte/node.{{$rpcAddr}}.crt; - cp /home/yugabyte/cert-manager/tls.key /opt/certs/yugabyte/node.{{$rpcAddr}}.key; - chmod 600 /opt/certs/yugabyte/* - else - echo "WARNING: Not refreshing certificates as the root ca.crt has changed" - fi && \ - {{- end }} {{- if eq .name "yb-masters" }} exec ${k8s_parent} /home/yugabyte/bin/yb-master \ {{- if not $root.Values.storage.ephemeral }} @@ -548,18 +475,10 @@ spec: {{- end }} ports: {{- range $label, $port := .ports }} - {{- if not (eq $label "grpc-ybc-port") }} - containerPort: {{ $port }} name: {{ $label | quote }} - {{- end }} {{- end}} volumeMounts: - {{- if (eq .name "yb-tservers") }} - - name: tserver-tmp - mountPath: /tmp - {{- end }} - - name: debug-hooks-volume - mountPath: /opt/debug_hooks_config {{ if not $root.Values.storage.ephemeral }} {{- range $index := until (int ($storageInfo.count)) }} - name: {{ $root.Values.oldNamingStyle | ternary (printf "datadir%d" $index) (printf "%s%d" (include "yugabyte.volume_name" $root) $index) }} @@ -568,7 +487,7 @@ spec: {{- end }} {{- if $root.Values.tls.enabled }} - name: {{ $root.Values.oldNamingStyle | ternary (printf "%s-yugabyte-tls-cert" .label) (printf "%s-%s-tls-cert" (include "yugabyte.fullname" $root) .label) }} - mountPath: {{ $root.Values.tls.certManager.enabled | ternary "/home/yugabyte/cert-manager" "/opt/certs/yugabyte" }} + mountPath: /opt/certs/yugabyte readOnly: true - name: {{ $root.Values.oldNamingStyle | ternary "yugabyte-tls-client-cert" (printf "%s-client-tls" (include "yugabyte.fullname" $root)) }} mountPath: /root/.yugabytedb/ @@ -609,78 +528,7 @@ spec: subPath: cores {{- end }} - {{- if and (eq .name "yb-tservers") ($root.Values.ybc.enabled) }} - - name: yb-controller - image: "{{ $root.Values.Image.repository }}:{{ $root.Values.Image.tag }}" - imagePullPolicy: {{ $root.Values.Image.pullPolicy }} - lifecycle: - postStart: - exec: - command: - - "bash" - - "-c" - - > - mkdir -p /mnt/disk0/yw-data/controller/tmp; - mkdir -p /mnt/disk0/yw-data/controller/conf; - mkdir -p /mnt/disk0/ybc-data/controller/logs; - mkdir -p /tmp/yugabyte/controller; - ln -sf /mnt/disk0/ybc-data/controller/logs /tmp/yugabyte/controller; - ln -sf /mnt/disk0/yw-data/controller/bin /tmp/yugabyte/controller; - rm -f /tmp/yugabyte/controller/yb-controller.pid; - {{- if and $root.Values.tls.enabled $root.Values.tls.certManager.enabled }} - mkdir -p /opt/certs; - ln -sf /mnt/disk0/certs /opt/certs/yugabyte; - {{- end }} - command: - - "/sbin/tini" - - "--" - args: - - "/bin/bash" - - "-c" - - > - while true; do - sleep 60; - /home/yugabyte/tools/k8s_ybc_parent.py status || /home/yugabyte/tools/k8s_ybc_parent.py start; - done - {{- with index $service.ports "grpc-ybc-port" }} - ports: - - containerPort: {{ . }} - name: "grpc-ybc-port" - {{- end }} - volumeMounts: - - name: tserver-tmp - mountPath: /tmp - {{- if not $root.Values.storage.ephemeral }} - {{- range $index := until (int ($storageInfo.count)) }} - - name: {{ $root.Values.oldNamingStyle | ternary (printf "datadir%d" $index) (printf "%s%d" (include "yugabyte.volume_name" $root) $index) }} - mountPath: /mnt/disk{{ $index }} - {{- end }} - {{- end }} - {{- if $root.Values.tls.enabled }} - - name: {{ $root.Values.oldNamingStyle | ternary (printf "%s-yugabyte-tls-cert" .label) (printf "%s-%s-tls-cert" (include "yugabyte.fullname" $root) .label) }} - mountPath: {{ $root.Values.tls.certManager.enabled | ternary "/home/yugabyte/cert-manager" "/opt/certs/yugabyte" }} - readOnly: true - {{- end }} - {{- if ($root.Values.tserver.extraVolumeMounts) -}} - {{- include "yugabyte.isExtraVolumesMappingExists" $root.Values.tserver -}} - {{- $root.Values.tserver.extraVolumeMounts | toYaml | nindent 10 -}} - {{- end -}} - {{- end}} - volumes: - {{- if (eq .name "yb-masters") }} - - name: debug-hooks-volume - configMap: - name: {{ include "yugabyte.fullname" $root }}-master-hooks - defaultMode: 0755 - {{- else if (eq .name "yb-tservers") }} - - name: debug-hooks-volume - configMap: - name: {{ include "yugabyte.fullname" $root }}-tserver-hooks - defaultMode: 0755 - - name: tserver-tmp - emptyDir: {} - {{- end }} {{ if not $root.Values.storage.ephemeral }} {{- range $index := until (int ($storageInfo.count)) }} - name: {{ $root.Values.oldNamingStyle | ternary (printf "datadir%d" $index) (printf "%s%d" (include "yugabyte.volume_name" $root) $index) }} @@ -689,24 +537,25 @@ spec: {{- end }} {{- end }} {{- if $root.Values.tls.enabled }} - {{- if $root.Values.tls.certManager.enabled }} - {{- /* certManager enabled */}} - - name: {{ $root.Values.oldNamingStyle | ternary (printf "%s-yugabyte-tls-cert" .label) (printf "%s-%s-tls-cert" (include "yugabyte.fullname" $root) .label) }} - projected: - sources: - {{- if not $root.Values.tls.certManager.bootstrapSelfsigned }} - - secret: - name: {{ printf "%s-root-ca" (include "yugabyte.fullname" $root) }} - {{- end }} - - secret: - name: {{ $root.Values.oldNamingStyle | ternary (printf "%s-yugabyte-tls-cert" .label) (printf "%s-%s-tls-cert" (include "yugabyte.fullname" $root) .label) }} - {{- else }} - {{/* certManager disabled */}} - name: {{ $root.Values.oldNamingStyle | ternary (printf "%s-yugabyte-tls-cert" .label) (printf "%s-%s-tls-cert" (include "yugabyte.fullname" $root) .label) }} secret: secretName: {{ $root.Values.oldNamingStyle | ternary (printf "%s-yugabyte-tls-cert" .label) (printf "%s-%s-tls-cert" (include "yugabyte.fullname" $root) .label) }} + {{- if $root.Values.tls.certManager.enabled }} + items: + {{- $replicas := (eq .name "yb-masters") | ternary $root.Values.replicas.master $root.Values.replicas.tserver -}} + {{- range $index := until ( int ( $replicas ) ) }} + {{- $nodeOldStyle := printf "%s-%d.%s.%s.svc.%s" $service.label $index $service.name $root.Release.Namespace $root.Values.domainName }} + {{- $nodeNewStyle := printf "%s-%s-%d.%s-%s.%s.svc.%s" (include "yugabyte.fullname" $root) $service.label $index (include "yugabyte.fullname" $root) $service.name $root.Release.Namespace $root.Values.domainName }} + {{- $node := $root.Values.oldNamingStyle | ternary $nodeOldStyle $nodeNewStyle }} + - key: tls.crt + path: node.{{$node}}.crt + - key: tls.key + path: node.{{$node}}.key + {{- end }} + - key: ca.crt + path: ca.crt + {{- end }} defaultMode: 256 - {{- end }} - name: {{ $root.Values.oldNamingStyle | ternary "yugabyte-tls-client-cert" (printf "%s-client-tls" (include "yugabyte.fullname" $root)) }} secret: secretName: {{ $root.Values.oldNamingStyle | ternary "yugabyte-tls-client-cert" (printf "%s-client-tls" (include "yugabyte.fullname" $root)) }} diff --git a/charts/yugabyte/yugabyte/values.yaml b/charts/yugabyte/yugabyte/values.yaml index a93f26ba0..6cd0ad11d 100644 --- a/charts/yugabyte/yugabyte/values.yaml +++ b/charts/yugabyte/yugabyte/values.yaml @@ -2,15 +2,10 @@ # This is a YAML-formatted file. # Declare variables to be passed into your templates. Component: "yugabytedb" - -fullnameOverride: "" -nameOverride: "" - Image: repository: "yugabytedb/yugabyte" - tag: 2.18.2.1-b1 + tag: 2.14.12.0-b19 pullPolicy: IfNotPresent - pullSecretName: "" storage: ephemeral: false # will not allocate PVs when true @@ -26,38 +21,27 @@ storage: resource: master: requests: - cpu: "2" + cpu: 2 memory: 2Gi limits: - cpu: "2" + cpu: 2 memory: 2Gi tserver: requests: - cpu: "2" + cpu: 2 memory: 4Gi limits: - cpu: "2" + cpu: 2 memory: 4Gi replicas: master: 3 tserver: 3 - ## Used to set replication factor when isMultiAz is set to true - totalMasters: 3 partition: master: 0 tserver: 0 -# Used in Multi-AZ setup -masterAddresses: "" - -isMultiAz: false -AZ: "" - -# Disable the YSQL -disableYsql: false - tls: # Set to true to enable the TLS. enabled: false @@ -68,33 +52,25 @@ tls: # Set enabled to true to use cert-manager instead of providing your own rootCA certManager: enabled: false - # Will create own ca certificate and issuer when set to true - bootstrapSelfsigned: true - # Use ClusterIssuer when set to true, otherwise use Issuer + # Will create own ca certificate and issuer when set to false useClusterIssuer: false - # Name of ClusterIssuer to use when useClusterIssuer is true + # ignored when useClusterIssuer is false clusterIssuer: cluster-ca - # Name of Issuer to use when useClusterIssuer is false - issuer: yugabyte-ca certificates: # The lifetime before cert-manager will issue a new certificate. # The re-issued certificates will not be automatically reloaded by the service. # It is necessary to provide some external means of restarting the pods. duration: 2160h # 90d renewBefore: 360h # 15d - algorithm: RSA # ECDSA or RSA - # Can be 2048, 4096 or 8192 for RSA + algorithm: ECDSA # ECDSA or RSA + # Can be 2046, 4096 or 8192 for RSA # Or 256, 384 or 521 for ECDSA - keySize: 2048 + keySize: 521 - ## When certManager.enabled=false, rootCA.cert and rootCA.key are used to generate TLS certs. - ## When certManager.enabled=true and boostrapSelfsigned=true, rootCA is ignored. - ## When certManager.enabled=true and bootstrapSelfsigned=false, only rootCA.cert is used - ## to verify TLS certs generated and signed by the external provider. + # Will be ignored when certManager.enabled=true rootCA: cert: "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM2VENDQWRHZ0F3SUJBZ0lCQVRBTkJna3Foa2lHOXcwQkFRc0ZBREFXTVJRd0VnWURWUVFERXd0WmRXZGgKWW5sMFpTQkVRakFlRncweE9UQXlNRGd3TURRd01qSmFGdzB5T1RBeU1EVXdNRFF3TWpKYU1CWXhGREFTQmdOVgpCQU1UQzFsMVoyRmllWFJsSUVSQ01JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBCnVOMWF1aWc4b2pVMHM0OXF3QXhrT2FCaHkwcTlyaVg2akVyZWJyTHJOWDJOeHdWQmNVcWJkUlhVc3VZNS96RUQKUC9CZTNkcTFuMm9EQ2ZGVEwweGkyNFdNZExRcnJBMndCdzFtNHM1WmQzcEJ1U04yWHJkVVhkeUx6dUxlczJNbgovckJxcWRscXp6LzAyTk9TOE9SVFZCUVRTQTBSOFNMQ1RjSGxMQmRkMmdxZ1ZmemVXRlVObXhWQ2EwcHA5UENuCmpUamJJRzhJWkh5dnBkTyt3aURQM1Y1a1ZEaTkvbEtUaGUzcTFOeDg5VUNFcnRJa1pjSkYvWEs3aE90MU1sOXMKWDYzb2lVMTE1Q2svbGFGRjR6dWgrZk9VenpOVXRXeTc2RE92cm5pVGlaU0tQZDBBODNNa2l2N2VHaDVkV3owWgpsKzJ2a3dkZHJaRzVlaHhvbGhGS3pRSURBUUFCbzBJd1FEQU9CZ05WSFE4QkFmOEVCQU1DQXFRd0hRWURWUjBsCkJCWXdGQVlJS3dZQkJRVUhBd0VHQ0NzR0FRVUZCd01DTUE4R0ExVWRFd0VCL3dRRk1BTUJBZjh3RFFZSktvWkkKaHZjTkFRRUxCUUFEZ2dFQkFEQjVRbmlYd1ptdk52eG5VbS9sTTVFbms3VmhTUzRUZldIMHY4Q0srZWZMSVBTbwpVTkdLNXU5UzNEUWlvaU9SN1Vmc2YrRnk1QXljMmNUY1M2UXBxTCt0V1QrU1VITXNJNk9oQ05pQ1gvQjNKWERPCkd2R0RIQzBVOHo3aWJTcW5zQ2Rid05kajAyM0lwMHVqNE9DVHJ3azZjd0RBeXlwVWkwN2tkd28xYWJIWExqTnAKamVQMkwrY0hkc2dKM1N4WWpkK1kvei9IdmFrZG1RZDJTL1l2V0R3aU1SRDkrYmZXWkJVRHo3Y0QyQkxEVmU0aAp1bkFaK3NyelR2Sjd5dkVodzlHSDFyajd4Qm9VNjB5SUUrYSszK2xWSEs4WnBSV0NXMnh2eWNrYXJSKytPS2NKClFsL04wWExqNWJRUDVoUzdhOTdhQktTamNqY3E5VzNGcnhJa2tKST0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=" key: "LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBdU4xYXVpZzhvalUwczQ5cXdBeGtPYUJoeTBxOXJpWDZqRXJlYnJMck5YMk54d1ZCCmNVcWJkUlhVc3VZNS96RURQL0JlM2RxMW4yb0RDZkZUTDB4aTI0V01kTFFyckEyd0J3MW00czVaZDNwQnVTTjIKWHJkVVhkeUx6dUxlczJNbi9yQnFxZGxxenovMDJOT1M4T1JUVkJRVFNBMFI4U0xDVGNIbExCZGQyZ3FnVmZ6ZQpXRlVObXhWQ2EwcHA5UENualRqYklHOElaSHl2cGRPK3dpRFAzVjVrVkRpOS9sS1RoZTNxMU54ODlVQ0VydElrClpjSkYvWEs3aE90MU1sOXNYNjNvaVUxMTVDay9sYUZGNHp1aCtmT1V6ek5VdFd5NzZET3ZybmlUaVpTS1BkMEEKODNNa2l2N2VHaDVkV3owWmwrMnZrd2RkclpHNWVoeG9saEZLelFJREFRQUJBb0lCQUJsdW1tU3gxR1djWER1Mwpwei8wZEhWWkV4c2NsU3U0SGRmZkZPcTF3cFlCUjlmeGFTZGsxQzR2YXF1UjhMaWl6WWVtVWViRGgraitkSnlSCmpwZ2JNaDV4S1BtRkw5empwU3ZUTkN4UHB3OUF5bm5sM3dyNHZhcU1CTS9aZGpuSGttRC9kQzBadEEvL0JIZ3YKNHk4d3VpWCsvUWdVaER0Z1JNcmR1ZUZ1OVlKaFo5UE9jYXkzSkkzMFhEYjdJSS9vNFNhYnhTcFI3bTg5WjY0NwpUb3hsOEhTSzl0SUQxbkl1bHVpTmx1dHI1RzdDdE93WTBSc2N5dmZ2elg4a1d2akpLZVJVbmhMSCtXVFZOaExICjdZc0tMNmlLa1NkckMzeWVPWnV4R0pEbVdrZVgxTzNPRUVGYkc4TjVEaGNqL0lXbDh1dGt3LzYwTEthNHBCS2cKTXhtNEx3RUNnWUVBNnlPRkhNY2pncHYxLzlHZC8yb3c2YmZKcTFjM1dqQkV2cnM2ZXNyMzgrU3UvdVFneXJNcAo5V01oZElpb2dYZjVlNjV5ZlIzYVBXcjJJdWMxZ0RUNlYycDZFR2h0NysyQkF1YkIzczloZisycVNRY1lkS3pmCnJOTDdKalE4ZEVGZWdYd041cHhKOTRTTVFZNEI4Qm9hOHNJWTd3TzU4dHpVMjZoclVnanFXQ1VDZ1lFQXlVUUIKNzViWlh6MGJ5cEc5NjNwYVp0bGlJY0cvUk1XMnVPOE9rVFNYSGdDSjBob25uRm5IMGZOc1pGTHdFWEtnTTRORworU3ZNbWtUekE5eVVSMHpIMFJ4UW44L1YzVWZLT2k5RktFeWx6NzNiRkV6ZW1QSEppQm12NWQ4ZTlOenZmU0E0CkdpRTYrYnFyV3VVWWRoRWlYTnY1SFNPZ3I4bUx1TzJDbGlmNTg0a0NnWUFlZzlDTmlJWmlOODAzOHNNWFYzZWIKalI5ZDNnYXY3SjJ2UnVyeTdvNDVGNDlpUXNiQ3AzZWxnY1RnczY5eWhkaFpwYXp6OGNEVndhREpyTW16cHF4cQpWY1liaFFIblppSWM5MGRubS9BaVF2eWJWNUZqNnQ5b05VVWtreGpaV1haalJXOGtZMW55QmtDUmJWVnhER0k4CjZOV0ZoeTFGaUVVVGNJcms3WVZFQlFLQmdRREpHTVIrYWRFamtlRlUwNjVadkZUYmN0VFVPY3dzb1Foalc2akkKZVMyTThxakNYeE80NnhQMnVTeFNTWFJKV3FpckQ3NDRkUVRvRjRCaEdXS21veGI3M3pqSGxWaHcwcXhDMnJ4VQorZENxODE0VXVJR3BlOTBMdWU3QTFlRU9kRHB1WVdUczVzc1FmdTE3MG5CUWQrcEhzaHNFZkhhdmJjZkhyTGpQCjQzMmhVUUtCZ1FDZ3hMZG5Pd2JMaHZLVkhhdTdPVXQxbGpUT240SnB5bHpnb3hFRXpzaDhDK0ZKUUQ1bkFxZXEKZUpWSkNCd2VkallBSDR6MUV3cHJjWnJIN3IyUTBqT2ZFallwU1dkZGxXaWh4OTNYODZ0aG83UzJuUlYrN1hNcQpPVW9ZcVZ1WGlGMWdMM1NGeHZqMHhxV3l0d0NPTW5DZGFCb0M0Tkw3enJtL0lZOEUwSkw2MkE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=" - ## When tls.certManager.enabled=false ## nodeCert and clientCert will be used only when rootCA.key is empty. ## Will be ignored and genSignedCert will be used to generate ## node and client certs if rootCA.key is provided. @@ -109,38 +85,33 @@ tls: gflags: master: default_memory_limit_to_ram_ratio: 0.85 - tserver: {} +# tserver: # use_cassandra_authentication: false PodManagementPolicy: Parallel enableLoadBalancer: true -ybc: - enabled: false +isMultiAz: false domainName: "cluster.local" serviceEndpoints: - name: "yb-master-ui" type: LoadBalancer - annotations: {} - clusterIP: "" ## Sets the Service's externalTrafficPolicy - externalTrafficPolicy: "" + # externalTrafficPolicy: "" app: "yb-master" - loadBalancerIP: "" + # loadBalancerIP: "" ports: http-ui: "7000" - name: "yb-tserver-service" type: LoadBalancer - annotations: {} - clusterIP: "" ## Sets the Service's externalTrafficPolicy - externalTrafficPolicy: "" + # externalTrafficPolicy: "" app: "yb-tserver" - loadBalancerIP: "" + # loadBalancerIP: "" ports: tcp-yql-port: "9042" tcp-yedis-port: "6379" @@ -167,11 +138,8 @@ Services: http-ycql-met: "12000" http-yedis-met: "11000" http-ysql-met: "13000" - grpc-ybc-port: "18018" - -## Should be set to true only if Istio is being used. This also adds -## the Istio sidecar injection labels to the pods. +## Should be set to true only if Istio is being used. ## TODO: remove this once ## https://github.com/yugabyte/yugabyte-db/issues/5641 is fixed. ## @@ -188,22 +156,6 @@ multicluster: ## failover. Useful when using new naming style. createCommonTserverService: false - ## Enable it to deploy YugabyteDB in a multi-cluster services enabled - ## Kubernetes cluster (KEP-1645). This will create ServiceExport. - ## GKE Ref - https://cloud.google.com/kubernetes-engine/docs/how-to/multi-cluster-services#registering_a_service_for_export - ## You can use this gist for the reference to deploy the YugabyteDB in a multi-cluster scenario. - ## Gist - https://gist.github.com/baba230896/78cc9bb6f4ba0b3d0e611cd49ed201bf - createServiceExports: false - - ## Mandatory variable when createServiceExports is set to true. - ## Use: In case of GKE, you need to pass GKE Hub Membership Name. - ## GKE Ref - https://cloud.google.com/kubernetes-engine/docs/how-to/multi-cluster-services#enabling - kubernetesClusterId: "" - - ## mcsApiVersion is used for the MCS resources created by the - ## chart. Set to net.gke.io/v1 when using GKE MCS. - mcsApiVersion: "multicluster.x-k8s.io/v1alpha1" - serviceMonitor: ## If true, two ServiceMonitor CRs are created. One for yb-master ## and one for yb-tserver @@ -279,22 +231,9 @@ affinity: {} statefulSetAnnotations: {} -networkAnnotation: {} - -commonLabels: {} - master: ## Ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#affinity-v1-core ## This might override the default affinity from service.yaml - # To successfully merge, we need to follow rules for merging nodeSelectorTerms that kubernentes - # has. Each new node selector term is ORed together, and each match expression or match field in - # a single selector is ANDed together. - # This means, if a pod needs to be scheduled on a label 'custom_label_1' with a value - # 'custom_value_1', we need to add this 'subterm' to each of our pre-defined node affinity - # terms. - # - # Pod anti affinity is a simpler merge. Each term is applied separately, and the weight is tracked. - # The pod that achieves the highest weight is selected. ## Example. # affinity: # podAntiAffinity: @@ -306,8 +245,6 @@ master: # values: # - "yb-master" # topologyKey: kubernetes.io/hostname - # - # For further examples, see examples/yugabyte/affinity_overrides.yaml affinity: {} ## Extra environment variables passed to the Master pods. @@ -368,15 +305,6 @@ master: tserver: ## Ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#affinity-v1-core ## This might override the default affinity from service.yaml - # To successfully merge, we need to follow rules for merging nodeSelectorTerms that kubernentes - # has. Each new node selector term is ORed together, and each match expression or match field in - # a single selector is ANDed together. - # This means, if a pod needs to be scheduled on a label 'custom_label_1' with a value - # 'custom_value_1', we need to add this 'subterm' to each of our pre-defined node affinity - # terms. - # - # Pod anti affinity is a simpler merge. Each term is applied separately, and the weight is tracked. - # The pod that achieves the highest weight is selected. ## Example. # affinity: # podAntiAffinity: @@ -388,7 +316,6 @@ tserver: # values: # - "yb-tserver" # topologyKey: kubernetes.io/hostname - # For further examples, see examples/yugabyte/affinity_overrides.yaml affinity: {} ## Extra environment variables passed to the TServer pods. @@ -401,16 +328,13 @@ tserver: # fieldPath: status.hostIP extraEnv: [] - ## secretEnv variables are used to expose secrets data as env variables in the tserver pods. - ## If namespace field is not specified we assume that user already - ## created the secret in the same namespace as DB pods. - ## Example + # secretEnv variables are used to expose secrets data as env variables in the tserver pods. + # TODO Add namespace also to support copying secrets from other namespace. # secretEnv: # - name: MYSQL_LDAP_PASSWORD # valueFrom: # secretKeyRef: # name: secretName - # namespace: my-other-namespace-with-ldap-secret # key: password secretEnv: [] diff --git a/charts/yugabyte/yugaware/Chart.yaml b/charts/yugabyte/yugaware/Chart.yaml index 7751fdee8..81be36a18 100644 --- a/charts/yugabyte/yugaware/Chart.yaml +++ b/charts/yugabyte/yugaware/Chart.yaml @@ -3,20 +3,15 @@ annotations: catalog.cattle.io/display-name: YugabyteDB Anywhere catalog.cattle.io/kube-version: '>=1.18-0' catalog.cattle.io/release-name: yugaware - charts.openshift.io/name: yugaware -apiVersion: v2 -appVersion: 2.18.2.1-b1 -description: YugabyteDB Anywhere provides deployment, orchestration, and monitoring - for managing YugabyteDB clusters. YugabyteDB Anywhere can create a YugabyteDB cluster - with multiple pods provided by Kubernetes or OpenShift and logically grouped together - to form one logical distributed database. +apiVersion: v1 +appVersion: 2.14.12.0-b19 +description: YugaWare is YugaByte Database's Orchestration and Management console. home: https://www.yugabyte.com icon: https://avatars0.githubusercontent.com/u/17074854?s=200&v=4 -kubeVersion: '>=1.18-0' maintainers: -- email: sanketh@yugabyte.com - name: Sanketh Indarapu -- email: gjalla@yugabyte.com - name: Govardhan Reddy Jalla +- email: ram@yugabyte.com + name: Ram Sri +- email: arnav@yugabyte.com + name: Arnav Agarwal name: yugaware -version: 2.18.2+1 +version: 2.14.12 diff --git a/charts/yugabyte/yugaware/README.md b/charts/yugabyte/yugaware/README.md index 0d190c0be..fa27ce3e0 100644 --- a/charts/yugabyte/yugaware/README.md +++ b/charts/yugabyte/yugaware/README.md @@ -1,7 +1,5 @@ YugabyteDB Anywhere gives you the simplicity and support to deliver a private database-as-a-service (DBaaS) at scale. Use YugabyteDB Anywhere to deploy YugabyteDB across any cloud anywhere in the world with a few clicks, simplify day 2 operations through automation, and get the services needed to realize business outcomes with the database. -YugabyteDB Anywhere can be deployed using this Helm chart. Detailed documentation is available at: -- [Install YugabyteDB Anywhere software - Kubernetes](https://docs.yugabyte.com/preview/yugabyte-platform/install-yugabyte-platform/install-software/kubernetes/) -- [Install YugabyteDB Anywhere software - OpenShift (Helm based)](https://docs.yugabyte.com/preview/yugabyte-platform/install-yugabyte-platform/install-software/openshift/#helm-based-installation) +YugabyteDB Anywhere can be deployed using this helm chart. Detailed documentation is available at [![Artifact HUB](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/yugabyte)](https://artifacthub.io/packages/search?repo=yugabyte) diff --git a/charts/yugabyte/yugaware/openshift.values.yaml b/charts/yugabyte/yugaware/openshift.values.yaml deleted file mode 100644 index f156a5535..000000000 --- a/charts/yugabyte/yugaware/openshift.values.yaml +++ /dev/null @@ -1,29 +0,0 @@ -# OCP compatible values for yugaware - -image: - - repository: quay.io/yugabyte/yugaware-ubi - - postgres: - registry: registry.redhat.io - tag: 1-88.1661531722 - name: rhscl/postgresql-13-rhel7 - - prometheus: - registry: registry.redhat.io - tag: v4.11.0 - name: openshift4/ose-prometheus - - nginx: - registry: registry.access.redhat.com - tag: 1-60.1665590917 - name: ubi8/nginx-120 - -rbac: - create: false - -ocpCompatibility: - enabled: true - -securityContext: - enabled: false diff --git a/charts/yugabyte/yugaware/questions.yaml b/charts/yugabyte/yugaware/questions.yaml index 904b9cf75..0f2a48dd3 100644 --- a/charts/yugabyte/yugaware/questions.yaml +++ b/charts/yugabyte/yugaware/questions.yaml @@ -15,7 +15,7 @@ questions: label: Yugabyte Platform image repository description: "Yugabyte Platform image repository" - variable: image.tag - default: "2.5.1.0-b153" + default: "2.14.1.0-b36" required: false type: string label: Yugabyte Platform image tag diff --git a/charts/yugabyte/yugaware/templates/_default_values.tpl b/charts/yugabyte/yugaware/templates/_default_values.tpl deleted file mode 100644 index 95ccbdb47..000000000 --- a/charts/yugabyte/yugaware/templates/_default_values.tpl +++ /dev/null @@ -1,18 +0,0 @@ -{{/* - The usage of helm upgrade [RELEASE] [CHART] --reuse-values --set [variable]:[value] throws an - error in the event that new entries are inserted to the values chart. - - This is because reuse-values flag uses the values from the last release. If --set (/--set-file/ - --set-string/--values/-f) is applied with the reuse-values flag, the values from the last - release are overridden for those variables alone, and newer changes to the chart are - unacknowledged. - - https://medium.com/@kcatstack/understand-helm-upgrade-flags-reset-values-reuse-values-6e58ac8f127e - - To prevent errors while applying upgrade with --reuse-values and --set flags after introducing - new variables, default values can be specified in this file. -*/}} - -{{- define "get_nginx_proxyReadTimeoutSec" -}} - {{ .Values.nginx.proxyReadTimeoutSec | default 600 }} -{{- end -}} diff --git a/charts/yugabyte/yugaware/templates/_helpers.tpl b/charts/yugabyte/yugaware/templates/_helpers.tpl index 232797171..ffe8e65cf 100644 --- a/charts/yugabyte/yugaware/templates/_helpers.tpl +++ b/charts/yugabyte/yugaware/templates/_helpers.tpl @@ -134,100 +134,3 @@ Make list of allowed CORS origins {{- end -}} ] {{- end -}} - -{{/* -Get or generate server cert and key -*/}} -{{- define "getOrCreateServerCert" -}} -{{- $root := .Root -}} -{{- if and $root.Values.tls.certificate $root.Values.tls.key -}} -server.key: {{ $root.Values.tls.key }} -server.crt: {{ $root.Values.tls.certificate }} -{{- else -}} - {{- $result := (lookup "v1" "Secret" .Namespace .Name).data -}} - {{- if $result -}} -server.key: {{ index $result "server.key" }} -server.crt: {{ index $result "server.crt" }} - {{- else -}} - {{- $cert := genSelfSignedCert $root.Values.tls.hostname nil nil 3560 -}} -server.key: {{ $cert.Key | b64enc }} -server.crt: {{ $cert.Cert | b64enc }} - {{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Get or generate server key cert in pem format -*/}} -{{- define "getOrCreateServerPem" -}} -{{- $root := .Root -}} -{{- if and $root.Values.tls.certificate $root.Values.tls.key -}} -{{- $decodedKey := $root.Values.tls.key | b64dec -}} -{{- $decodedCert := $root.Values.tls.certificate | b64dec -}} -{{- $serverPemContentTemp := ( printf "%s\n%s" $decodedKey $decodedCert ) -}} -{{- $serverPemContent := $serverPemContentTemp | b64enc -}} -server.pem: {{ $serverPemContent }} -{{- else -}} - {{- $result := (lookup "v1" "Secret" .Namespace .Name).data -}} - {{- if $result -}} -{{- $serverPemContent := ( index $result "server.pem" ) -}} -server.pem: {{ $serverPemContent }} - {{- else -}} - {{- $cert := genSelfSignedCert $root.Values.tls.hostname nil nil 3560 -}} -{{- $serverPemContentTemp := ( printf "%s\n%s" $cert.Key $cert.Cert ) -}} -{{- $serverPemContent := $serverPemContentTemp | b64enc -}} -server.pem: {{ $serverPemContent }} - {{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Check export of nss_wrapper environment variables required -*/}} -{{- define "checkNssWrapperExportRequired" -}} - {{- if .Values.securityContext.enabled -}} - {{- if and (ne (int .Values.securityContext.runAsUser) 0) (ne (int .Values.securityContext.runAsUser) 10001) -}} - {{- printf "true" -}} - {{- end -}} - {{- else -}} - {{- printf "false" -}} - {{- end -}} -{{- end -}} - - -{{/* - Verify the extraVolumes and extraVolumeMounts mappings. - Every extraVolumes should have extraVolumeMounts -*/}} -{{- define "yugaware.isExtraVolumesMappingExists" -}} - {{- $lenExtraVolumes := len .extraVolumes -}} - {{- $lenExtraVolumeMounts := len .extraVolumeMounts -}} - - {{- if and (eq $lenExtraVolumeMounts 0) (gt $lenExtraVolumes 0) -}} - {{- fail "You have not provided the extraVolumeMounts for extraVolumes." -}} - {{- else if and (eq $lenExtraVolumes 0) (gt $lenExtraVolumeMounts 0) -}} - {{- fail "You have not provided the extraVolumes for extraVolumeMounts." -}} - {{- else if and (gt $lenExtraVolumes 0) (gt $lenExtraVolumeMounts 0) -}} - {{- $volumeMountsList := list -}} - {{- range .extraVolumeMounts -}} - {{- $volumeMountsList = append $volumeMountsList .name -}} - {{- end -}} - - {{- $volumesList := list -}} - {{- range .extraVolumes -}} - {{- $volumesList = append $volumesList .name -}} - {{- end -}} - - {{- range $volumesList -}} - {{- if not (has . $volumeMountsList) -}} - {{- fail (printf "You have not provided the extraVolumeMounts for extraVolume %s" .) -}} - {{- end -}} - {{- end -}} - - {{- range $volumeMountsList -}} - {{- if not (has . $volumesList) -}} - {{- fail (printf "You have not provided the extraVolumes for extraVolumeMounts %s" .) -}} - {{- end -}} - {{- end -}} - {{- end -}} -{{- end -}} diff --git a/charts/yugabyte/yugaware/templates/certificates.yaml b/charts/yugabyte/yugaware/templates/certificates.yaml deleted file mode 100644 index ff4b7021a..000000000 --- a/charts/yugabyte/yugaware/templates/certificates.yaml +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright (c) YugaByte, Inc. - -{{- $root := . }} -{{- $tls := $root.Values.tls }} -{{- if and $tls.enabled $tls.certManager.enabled }} -{{- if $tls.certManager.genSelfsigned }} -{{- if $tls.certManager.useClusterIssuer }} ---- -apiVersion: cert-manager.io/v1 -kind: ClusterIssuer -metadata: - name: {{ $root.Release.Name }}-yugaware-cluster-issuer -spec: - selfSigned: {} -{{- else }} # useClusterIssuer=false ---- -apiVersion: cert-manager.io/v1 -kind: Issuer -metadata: - name: {{ $root.Release.Name }}-yugaware-issuer - namespace: {{ $root.Release.Namespace }} -spec: - selfSigned: {} ---- -{{- end }} # useClusterIssuer ---- -apiVersion: cert-manager.io/v1 -kind: Certificate -metadata: - name: {{ $root.Release.Name }}-yugaware-ui-root-ca - namespace: {{ $root.Release.Namespace }} -spec: - isCA: true - commonName: Yugaware self signed CA - secretName: {{ .Release.Name }}-yugaware-root-ca - secretTemplate: - labels: - app: "{{ template "yugaware.name" . }}" - chart: "{{ template "yugaware.chart" . }}" - release: {{ .Release.Name | quote }} - heritage: {{ .Release.Service | quote }} - duration: {{ $tls.certManager.configuration.duration | quote }} - renewBefore: {{ $tls.certManager.configuration.renewBefore | quote }} - privateKey: - algorithm: {{ $tls.certManager.configuration.algorithm | quote }} - encoding: PKCS8 - size: {{ $tls.certManager.configuration.keySize }} - rotationPolicy: Always - issuerRef: - {{- if $tls.certManager.useClusterIssuer }} - name: {{ $root.Release.Name }}-yugaware-cluster-issuer - kind: ClusterIssuer - {{- else }} - name: {{ $root.Release.Name }}-yugaware-issuer - kind: Issuer - {{- end }} ---- -apiVersion: cert-manager.io/v1 -kind: Issuer -metadata: - name: {{ $root.Release.Name }}-yugaware-ca-issuer - namespace: {{ $root.Release.Namespace }} -spec: - ca: - secretName: {{ .Release.Name }}-yugaware-root-ca ---- -{{- end }} # genSelfsigned ---- -apiVersion: cert-manager.io/v1 -kind: Certificate -metadata: - name: {{ $root.Release.Name }}-yugaware-ui-tls - namespace: {{ $root.Release.Namespace }} -spec: - isCA: false - commonName: {{ $tls.hostname }} - secretName: {{ .Release.Name }}-yugaware-tls-cert - secretTemplate: - labels: - app: "{{ template "yugaware.name" . }}" - chart: "{{ template "yugaware.chart" . }}" - release: {{ .Release.Name | quote }} - heritage: {{ .Release.Service | quote }} - duration: {{ $tls.certManager.configuration.duration | quote }} - renewBefore: {{ $tls.certManager.configuration.renewBefore | quote }} - privateKey: - algorithm: {{ $tls.certManager.configuration.algorithm | quote }} - encoding: PKCS8 - size: {{ $tls.certManager.configuration.keySize }} - rotationPolicy: Always - issuerRef: - name: {{ $tls.certManager.genSelfsigned | ternary (printf "%s%s" $root.Release.Name "-yugaware-ca-issuer") ($tls.certManager.useClusterIssuer | ternary $tls.certManager.clusterIssuer $tls.certManager.issuer) }} - {{- if $tls.certManager.useClusterIssuer }} - kind: ClusterIssuer - {{- else }} - kind: Issuer - {{- end }} ---- -{{- end }} diff --git a/charts/yugabyte/yugaware/templates/configs.yaml b/charts/yugabyte/yugaware/templates/configs.yaml index 6c9cd550e..76b2f80cf 100644 --- a/charts/yugabyte/yugaware/templates/configs.yaml +++ b/charts/yugabyte/yugaware/templates/configs.yaml @@ -31,40 +31,28 @@ data: log.override.path = "/opt/yugabyte/yugaware/data/logs" db { - default.dbname=${POSTGRES_DB} {{ if .Values.postgres.external.host }} default.host="{{ .Values.postgres.external.host }}" default.port={{ .Values.postgres.external.port }} + default.url="jdbc:postgresql://"${db.default.host}":"${db.default.port}"/"${POSTGRES_DB}${db.default.params} {{ else if eq .Values.ip_version_support "v6_only" }} - default.host="[::1]" + default.host="::1" + default.url="jdbc:postgresql://[::1]:"${db.default.port}"/"${POSTGRES_DB}${db.default.params} {{ else }} default.host="127.0.0.1" + default.url="jdbc:postgresql://127.0.0.1:"${db.default.port}"/"${POSTGRES_DB}${db.default.params} {{ end }} - default.url="jdbc:postgresql://"${db.default.host}":"${db.default.port}"/"${db.default.dbname}${db.default.params} default.params="{{ .Values.jdbcParams }}" + default.driver=org.postgresql.Driver default.username=${POSTGRES_USER} default.password=${POSTGRES_PASSWORD} - {{ if .Values.yugaware.cloud.enabled }} - perf_advisor.driver="org.hsqldb.jdbc.JDBCDriver" - perf_advisor.url="jdbc:hsqldb:mem:perf-advisor" - perf_advisor.createDatabaseIfMissing=false - perf_advisor.username="sa" - perf_advisor.password="sa" - perf_advisor.migration.auto=false - perf_advisor.migration.disabled=true - {{ else }} - perf_advisor.url="jdbc:postgresql://"${db.default.host}":"${db.default.port}"/"${db.perf_advisor.dbname}${db.default.params} - perf_advisor.createDatabaseUrl="jdbc:postgresql://"${db.default.host}":"${db.default.port}"/"${db.default.dbname}${db.default.params} - {{ end }} + default.logStatements=true + default.migration.initOnMigrate=true + default.migration.auto=true } - - {{- if and (not .Values.useNginxProxy) (.Values.tls.enabled) }} - https.port = 9443 - play.server.https.keyStore { - path = /opt/certs/server.pem - type = PEM + ebean { + default = ["com.yugabyte.yw.models.*"] } - {{- end }} yb { {{- if .Values.yugaware.universe_boot_script }} @@ -98,8 +86,6 @@ data: kubernetes.storageClass = "{{ .Values.yugaware.storageClass }}" kubernetes.pullSecretName = "{{ .Values.image.pullSecret }}" url = "https://{{ .Values.tls.hostname }}" - # GKE MCS takes 7 to 10 minutes to setup DNS - wait_for_server_timeout = 15 minutes } play.filters { @@ -140,8 +126,7 @@ data: {{- range $key, $value := .Values.additionalAppConf.nonStringConf }} {{ $key }} = {{ $value }} {{- end }} -{{- if and .Values.tls.enabled (not .Values.tls.certManager.enabled) }} -{{- if .Values.useNginxProxy }} +{{- if .Values.tls.enabled }} --- apiVersion: v1 kind: Secret @@ -154,27 +139,10 @@ metadata: heritage: {{ .Release.Service | quote }} type: Opaque data: -{{- include "getOrCreateServerCert" (dict "Namespace" .Release.Namespace "Root" . "Name" (printf "%s%s" .Release.Name "-yugaware-tls-cert")) | nindent 2 }} + server.crt: {{ .Values.tls.certificate }} + server.key: {{ .Values.tls.key }} {{- end }} -{{ if not .Values.useNginxProxy }} ---- -apiVersion: v1 -kind: Secret -metadata: - name: {{ .Release.Name }}-yugaware-tls-pem - labels: - app: "{{ template "yugaware.name" . }}" - chart: "{{ template "yugaware.chart" . }}" - release: {{ .Release.Name | quote }} - heritage: {{ .Release.Service | quote }} -type: Opaque -data: -{{- include "getOrCreateServerPem" (dict "Namespace" .Release.Namespace "Root" . "Name" (printf "%s%s" .Release.Name "-yugaware-tls-pem")) | nindent 2 }} -{{ end }} -{{- end }} - -{{- if .Values.useNginxProxy }} --- apiVersion: v1 kind: ConfigMap @@ -193,7 +161,7 @@ data: listen {{ eq .Values.ip_version_support "v6_only" | ternary "[::]:8080" "8080" }}; server_name {{ .Values.tls.hostname }}; return 301 https://$host$request_uri; - } + } {{- end }} server { @@ -212,7 +180,7 @@ data: {{- end }} proxy_http_version 1.1; proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto $scheme; proxy_set_header Host $host; @@ -230,9 +198,7 @@ data: client_max_body_size {{ .Values.nginx.upload_size }}; } } -{{ end }} --- -{{- if not (and (.Values.ocpCompatibility.enabled) (eq .Values.image.postgres.registry "registry.redhat.io")) }} apiVersion: v1 kind: ConfigMap metadata: @@ -258,26 +224,6 @@ data: docker-upgrade pg_upgrade | tee -a /pg_upgrade_logs/pg_upgrade_11_to_14.log; echo "host all all all scram-sha-256" >> "${PGDATANEW}/pg_hba.conf"; fi -{{- end }} -{{- if .Values.securityContext.enabled }} ---- -apiVersion: "v1" -kind: ConfigMap -metadata: - name: {{ .Release.Name }}-yugaware-pg-prerun - labels: - app: {{ template "yugaware.name" . }} - chart: {{ template "yugaware.chart" . }} - release: {{ .Release.Name }} - heritage: {{ .Values.helm2Legacy | ternary "Tiller" (.Release.Service | quote) }} -data: - pg-prerun.sh: | - #!/bin/bash - set -x -o errexit - - mkdir -p $PGDATA && chown -R $PG_UID:$PG_GID $PGDATA; -{{- end }} -{{- if .Values.useNginxProxy }} --- apiVersion: v1 kind: ConfigMap @@ -306,8 +252,6 @@ data: uwsgi_temp_path /tmp/uwsgi_temp; scgi_temp_path /tmp/scgi_temp; - proxy_read_timeout {{ template "get_nginx_proxyReadTimeoutSec" . }}; - include /etc/nginx/mime.types; default_type application/octet-stream; @@ -326,7 +270,6 @@ data: include /etc/nginx/conf.d/*.conf; } -{{- end }} {{- if .Values.prometheus.remoteWrite.tls.enabled }} --- apiVersion: v1 @@ -396,11 +339,7 @@ data: - 'container_cpu_usage_seconds_total{pod=~"(.*)yb-(.*)"}' - 'container_memory_working_set_bytes{pod=~"(.*)yb-(.*)"}' # kube-state-metrics - # Supports >= OCP v4.4 - # OCP v4.4 has upgraded the KSM from 1.8.0 to 1.9.5. - # https://docs.openshift.com/container-platform/4.4/release_notes/ocp-4-4-release-notes.html#ocp-4-4-cluster-monitoring-version-updates - # - 'kube_pod_container_resource_requests_cpu_cores{pod=~"(.*)yb-(.*)"}' - - 'kube_pod_container_resource_requests{pod=~"(.*)yb-(.*)", unit="core"}' + - 'kube_pod_container_resource_requests_cpu_cores{pod=~"(.*)yb-(.*)"}' static_configs: - targets: @@ -420,12 +359,6 @@ data: regex: "(.*)" target_label: "container_name" replacement: "$1" - # rename new name of the CPU metric to the old name and label - # ref: https://github.com/kubernetes/kube-state-metrics/blob/master/CHANGELOG.md#v200-alpha--2020-09-16 - - source_labels: ["__name__", "unit"] - regex: "kube_pod_container_resource_requests;core" - target_label: "__name__" - replacement: "kube_pod_container_resource_requests_cpu_cores" {{- else }} @@ -476,8 +409,8 @@ data: - targets: ['kube-state-metrics.kube-system.svc.{{.Values.domainName}}:8080'] metric_relabel_configs: # Only keep the metrics which we care about - - source_labels: ["__name__", "unit"] - regex: "kube_pod_container_resource_requests;core" + - source_labels: ["__name__"] + regex: "kube_pod_container_resource_requests_cpu_cores" action: keep # Save the name of the metric so we can group_by since we cannot by __name__ directly... - source_labels: ["__name__"] @@ -496,16 +429,6 @@ data: - source_labels: ["pod_name"] regex: "(.*)yb-(.*)" action: keep - # rename new name of the CPU metric to the old name and label - # ref: https://github.com/kubernetes/kube-state-metrics/blob/master/CHANGELOG.md#v200-alpha--2020-09-16 - - source_labels: ["__name__", "unit"] - regex: "kube_pod_container_resource_requests;core" - target_label: "__name__" - replacement: "kube_pod_container_resource_requests_cpu_cores" - # Keep metrics for CPU, discard duplicate metrics - - source_labels: ["__name__"] - regex: "kube_pod_container_resource_requests_cpu_cores" - action: keep - job_name: 'kubernetes-cadvisor' @@ -559,12 +482,6 @@ data: '{{ eq .Values.ip_version_support "v6_only" | ternary "[::1]" "127.0.0.1" }}:9000' ] - - job_name: 'node-agent' - metrics_path: "/metrics" - file_sd_configs: - - files: - - '/opt/yugabyte/prometheus/targets/node-agent.*.json' - - job_name: "node" file_sd_configs: - files: @@ -650,8 +567,6 @@ data: replacement: "$1" - job_name: "yugabyte" - tls_config: - insecure_skip_verify: true metrics_path: "/prometheus-metrics" file_sd_configs: - files: diff --git a/charts/yugabyte/yugaware/templates/global-config.yaml b/charts/yugabyte/yugaware/templates/global-config.yaml index 4d7f54f45..925e1bbb7 100644 --- a/charts/yugabyte/yugaware/templates/global-config.yaml +++ b/charts/yugabyte/yugaware/templates/global-config.yaml @@ -16,8 +16,8 @@ data: postgres_user: {{ .Values.postgres.external.user | b64enc | quote }} postgres_password: {{ .Values.postgres.external.pass | b64enc | quote }} {{- else }} - postgres_db: {{ .Values.postgres.dbname | b64enc | quote }} - postgres_user: {{ .Values.postgres.user | b64enc | quote }} + postgres_db: {{ "yugaware" | b64enc | quote }} + postgres_user: {{ "postgres" | b64enc | quote }} postgres_password: {{ include "getOrGeneratePasswordConfigMapToSecret" (dict "Namespace" .Release.Namespace "Name" (printf "%s%s" .Release.Name "-yugaware-global-config") "Key" "postgres_password") | quote }} {{- end }} app_secret: {{ randAlphaNum 64 | b64enc | b64enc | quote }} diff --git a/charts/yugabyte/yugaware/templates/rbac.yaml b/charts/yugabyte/yugaware/templates/rbac.yaml index d05dfaeec..907f9e1ce 100644 --- a/charts/yugabyte/yugaware/templates/rbac.yaml +++ b/charts/yugabyte/yugaware/templates/rbac.yaml @@ -1,4 +1,3 @@ -{{ if not .Values.yugaware.serviceAccount }} apiVersion: v1 kind: ServiceAccount metadata: @@ -11,7 +10,6 @@ metadata: annotations: {{ toYaml .Values.yugaware.serviceAccountAnnotations | indent 4 }} {{- end }} -{{ end }} {{- if .Values.rbac.create }} {{- if .Values.ocpCompatibility.enabled }} --- @@ -23,7 +21,7 @@ metadata: app: yugaware subjects: - kind: ServiceAccount - name: {{ .Values.yugaware.serviceAccount | default .Release.Name }} + name: {{ .Release.Name }} namespace: {{ .Release.Namespace }} roleRef: kind: ClusterRole @@ -31,31 +29,15 @@ roleRef: apiGroup: rbac.authorization.k8s.io {{- else }} --- -apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 metadata: name: {{ .Release.Name }} + labels: + k8s-app: yugaware + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile rules: -- apiGroups: ["policy"] - resources: - - poddisruptionbudgets - verbs: ["get", "create", "delete", "patch"] -- apiGroups: [""] - resources: - - services - verbs: ["get", "delete", "create", "patch", "list", "watch"] -- apiGroups: ["apps"] - resources: - - statefulsets - verbs: ["get", "delete", "create", "patch", "scale"] -- apiGroups: [""] - resources: - - secrets - verbs: ["create", "list", "get", "delete", "update", "patch"] -- apiGroups: ["cert-manager.io"] - resources: - - certificates - verbs: ["create", "delete", "get", "patch"] - apiGroups: [""] resources: - nodes @@ -64,8 +46,7 @@ rules: - endpoints - pods - pods/exec - - configmaps # added configmaps resource - verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] # added all verbs for configmaps + verbs: ["get", "list", "watch", "create"] - apiGroups: - extensions resources: @@ -78,13 +59,13 @@ rules: - namespaces - secrets - pods/portforward - - events # added events resource - verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] # added all verbs for events + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] - apiGroups: ["", "extensions"] resources: - deployments - services verbs: ["create", "get", "list", "watch", "update", "delete"] + --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 @@ -96,7 +77,7 @@ metadata: addonmanager.kubernetes.io/mode: Reconcile subjects: - kind: ServiceAccount - name: {{ .Values.yugaware.serviceAccount | default .Release.Name }} + name: {{ .Release.Name }} namespace: {{ .Release.Namespace }} roleRef: kind: ClusterRole diff --git a/charts/yugabyte/yugaware/templates/service.yaml b/charts/yugabyte/yugaware/templates/service.yaml index 8620cee08..8b93d84cb 100644 --- a/charts/yugabyte/yugaware/templates/service.yaml +++ b/charts/yugabyte/yugaware/templates/service.yaml @@ -24,24 +24,14 @@ spec: {{- end }} {{- end }} ports: -{{- if and (.Values.tls.enabled) (.Values.useNginxProxy) }} +{{- if .Values.tls.enabled }} - name: ui-tls port: 443 targetPort: 8443 -{{- else if .Values.tls.enabled }} - - name: ui-tls - port: 443 - targetPort: 9443 {{- end }} -{{- if .Values.useNginxProxy }} - name: ui port: 80 targetPort: 8080 -{{- else }} - - name: ui - port: 80 - targetPort: 9000 -{{- end }} - name: metrics port: 9090 selector: @@ -50,10 +40,6 @@ spec: {{- if and (eq .Values.yugaware.service.type "LoadBalancer") (.Values.yugaware.service.ip) }} loadBalancerIP: "{{ .Values.yugaware.service.ip }}" {{- end }} - {{- if .Values.yugaware.service.loadBalancerSourceRanges }} - loadBalancerSourceRanges: - {{- toYaml .Values.yugaware.service.loadBalancerSourceRanges | nindent 4 }} - {{- end }} {{- end }} {{- if .Values.yugaware.serviceMonitor.enabled }} --- diff --git a/charts/yugabyte/yugaware/templates/statefulset.yaml b/charts/yugabyte/yugaware/templates/statefulset.yaml index fbf914b1b..33a260ada 100644 --- a/charts/yugabyte/yugaware/templates/statefulset.yaml +++ b/charts/yugabyte/yugaware/templates/statefulset.yaml @@ -25,11 +25,8 @@ spec: {{- end }} labels: app: {{ .Release.Name }}-yugaware -{{- if .Values.yugaware.pod.labels }} -{{ toYaml .Values.yugaware.pod.labels | indent 8 }} -{{- end }} spec: - serviceAccountName: {{ .Values.yugaware.serviceAccount | default .Release.Name }} + serviceAccountName: {{ .Release.Name }} imagePullSecrets: - name: {{ .Values.image.pullSecret }} {{- if .Values.securityContext.enabled }} @@ -39,30 +36,6 @@ spec: fsGroupChangePolicy: {{ .Values.securityContext.fsGroupChangePolicy }} {{- end }} {{- end }} - {{- if .Values.nodeSelector }} - nodeSelector: -{{ toYaml .Values.nodeSelector | indent 8}} - {{- end }} - {{- if .Values.tolerations }} - tolerations: - {{- with .Values.tolerations }}{{ toYaml . | nindent 8 }}{{ end }} - {{- end }} - {{- if .Values.zoneAffinity }} - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: failure-domain.beta.kubernetes.io/zone - operator: In - values: -{{ toYaml .Values.zoneAffinity | indent 18 }} - - matchExpressions: - - key: topology.kubernetes.io/zone - operator: In - values: -{{ toYaml .Values.zoneAffinity | indent 18 }} - {{- end }} volumes: - name: yugaware-storage persistentVolumeClaim: @@ -84,7 +57,6 @@ spec: - key: universe_boot_script path: universe-boot-script.sh {{- end }} - {{- if .Values.useNginxProxy }} - name: nginx-config configMap: name: {{ .Release.Name }}-yugaware-nginx-config @@ -97,7 +69,6 @@ spec: items: - key: nginx.conf path: nginx.conf - {{- end }} - name: prometheus-config configMap: name: {{ .Release.Name }}-yugaware-prometheus-config @@ -112,55 +83,25 @@ spec: - key: init-permissions.sh path: init-permissions.sh {{- end }} - {{- if and (.Values.tls.enabled) (.Values.useNginxProxy) }} + {{- if .Values.tls.enabled }} - name: {{ .Release.Name }}-yugaware-tls-cert secret: secretName: {{ .Release.Name }}-yugaware-tls-cert - {{- if .Values.tls.certManager.enabled }} - items: - - key: tls.crt - path: server.crt - - key: tls.key - path: server.key - {{- end }} - {{- end }} - {{- if and (not .Values.useNginxProxy) (.Values.tls.enabled) }} - - name: {{ .Release.Name }}-yugaware-tls-pem - secret: - secretName: {{ .Release.Name }}-yugaware-tls-pem - items: - - key: server.pem - path: server.pem {{- end }} {{- if .Values.prometheus.remoteWrite.tls.enabled }} - name: {{ .Release.Name }}-yugaware-prometheus-remote-write-tls secret: secretName: {{ .Release.Name }}-yugaware-prometheus-remote-write-tls {{- end }} - {{- if not (and (.Values.ocpCompatibility.enabled) (eq .Values.image.postgres.registry "registry.redhat.io")) }} - name: pg-upgrade-11-to-14 configMap: name: {{ .Release.Name }}-yugaware-pg-upgrade items: - key: pg-upgrade-11-to-14.sh path: pg-upgrade-11-to-14.sh - {{- end }} - - name: pg-init - configMap: - name: {{ .Release.Name }}-yugaware-pg-prerun - items: - - key: pg-prerun.sh - path: pg-prerun.sh - {{- if .Values.postgres.extraVolumes -}} - {{- include "yugaware.isExtraVolumesMappingExists" .Values.postgres -}} - {{- .Values.postgres.extraVolumes | toYaml | nindent 8 -}} - {{ end }} initContainers: - image: {{ include "full_yugaware_image" . }} imagePullPolicy: {{ .Values.image.pullPolicy }} - {{- if .Values.initContainers.prometheusConfiguration.resources }} - resources: {{- toYaml .Values.initContainers.prometheusConfiguration.resources | nindent 12 }} - {{ end -}} name: prometheus-configuration {{- if .Values.securityContext.enabled }} command: @@ -188,13 +129,9 @@ spec: - name: init-container-script mountPath: /init-container {{- end }} - {{- if not (and (.Values.ocpCompatibility.enabled) (eq .Values.image.postgres.registry "registry.redhat.io")) }} - image: {{ include "full_image" (dict "containerName" "postgres-upgrade" "root" .) }} imagePullPolicy: {{ .Values.image.pullPolicy }} name: postgres-upgrade - {{- if .Values.initContainers.postgresUpgrade.resources }} - resources: {{- toYaml .Values.initContainers.postgresUpgrade.resources | nindent 12 }} - {{ end -}} command: - 'bash' - '-c' @@ -224,46 +161,12 @@ spec: - name: yugaware-storage mountPath: /pg_upgrade_logs subPath: postgres_data_14 - {{- end }} - {{- if .Values.securityContext.enabled }} - - image: {{ include "full_image" (dict "containerName" "postgres" "root" .) }} - name: postgres-init - {{- if .Values.initContainers.postgresInit.resources }} - resources: {{- toYaml .Values.initContainers.postgresInit.resources | nindent 12 }} - {{ end -}} - imagePullPolicy: {{ .Values.image.pullPolicy }} - command: ["/bin/bash", "/pg_prerun/pg-prerun.sh"] - env: - - name: PGDATA - value: /var/lib/postgresql/data/pgdata - - name: PG_UID - value: {{ .Values.securityContext.runAsUser | quote }} - - name: PG_GID - value: {{ .Values.securityContext.runAsGroup | quote }} - volumeMounts: - - name: yugaware-storage - mountPath: /var/lib/postgresql/data - subPath: postgres_data_14 - - name: pg-init - mountPath: /pg_prerun - {{- end }} containers: {{ if not .Values.postgres.external.host }} - name: postgres image: {{ include "full_image" (dict "containerName" "postgres" "root" .) }} imagePullPolicy: {{ .Values.image.pullPolicy }} - args: - {{- if and (.Values.ocpCompatibility.enabled) (eq .Values.image.postgres.registry "registry.redhat.io") }} - - "run-postgresql" - {{- end }} - - "-c" - - "huge_pages=off" - {{- if .Values.securityContext.enabled }} - securityContext: - runAsUser: {{ required "runAsUser cannot be empty" .Values.securityContext.runAsUser }} - runAsGroup: {{ .Values.securityContext.runAsGroup | default 0 }} - runAsNonRoot: {{ .Values.securityContext.runAsNonRoot }} - {{- end }} + args: ["-c", "huge_pages=off"] env: - name: POSTGRES_USER valueFrom: @@ -280,37 +183,8 @@ spec: secretKeyRef: name: {{ .Release.Name }}-yugaware-global-config key: postgres_db - {{- if and (.Values.ocpCompatibility.enabled) (eq .Values.image.postgres.registry "registry.redhat.io") }} - # Hardcoded the POSTGRESQL_USER because it's mandatory env var in RH PG image - # It doesn't have access to create the DB, so YBA fails to create the perf_advisor DB. - # Need to use admin user of RH PG image (postgres) - # Changing the user name won't be possible moving forward for OpenShift certified chart - - name: POSTGRESQL_USER - value: pg-yba - # valueFrom: - # secretKeyRef: - # name: {{ .Release.Name }}-yugaware-global-config - # key: postgres_user - - name: POSTGRESQL_PASSWORD - valueFrom: - secretKeyRef: - name: {{ .Release.Name }}-yugaware-global-config - key: postgres_password - - name: POSTGRESQL_ADMIN_PASSWORD - valueFrom: - secretKeyRef: - name: {{ .Release.Name }}-yugaware-global-config - key: postgres_password - - name: POSTGRESQL_DATABASE - valueFrom: - secretKeyRef: - name: {{ .Release.Name }}-yugaware-global-config - key: postgres_db - {{- else }} - # The RH Postgres image doesn't allow this directory to be changed. - name: PGDATA value: /var/lib/postgresql/data/pgdata - {{- end }} ports: - containerPort: 5432 name: postgres @@ -322,17 +196,8 @@ spec: volumeMounts: - name: yugaware-storage - {{- if and (.Values.ocpCompatibility.enabled) (eq .Values.image.postgres.registry "registry.redhat.io") }} - mountPath: /var/lib/pgsql/data - subPath: postgres_data_13 - {{- else }} mountPath: /var/lib/postgresql/data subPath: postgres_data_14 - {{- end }} - {{- if .Values.postgres.extraVolumeMounts -}} - {{- include "yugaware.isExtraVolumesMappingExists" .Values.postgres -}} - {{- .Values.postgres.extraVolumeMounts | toYaml | nindent 12 -}} - {{- end -}} {{ end }} - name: prometheus image: {{ include "full_image" (dict "containerName" "prometheus" "root" .) }} @@ -358,9 +223,6 @@ spec: subPath: prometheus.yml - name: yugaware-storage mountPath: /prometheus/ - - mountPath: /opt/yugabyte/yugaware/data/keys/ - name: yugaware-storage - subPath: data/keys {{- if .Values.prometheus.scrapeNodes }} - name: yugaware-storage mountPath: /opt/yugabyte/prometheus/targets @@ -382,9 +244,6 @@ spec: - --web.enable-admin-api - --web.enable-lifecycle - --storage.tsdb.retention.time={{ .Values.prometheus.retentionTime }} - - --query.max-concurrency={{ .Values.prometheus.queryConcurrency }} - - --query.max-samples={{ .Values.prometheus.queryMaxSamples }} - - --query.timeout={{ .Values.prometheus.queryTimeout }} ports: - containerPort: 9090 - name: yugaware @@ -401,18 +260,12 @@ spec: resources: {{ toYaml .Values.yugaware.resources | indent 12 }} {{- end }} - args: ["bin/yugaware","-Dconfig.file=/data/application.docker.conf"] + + command: [ "/sbin/tini", "--"] + args: + - "bin/yugaware" + - "-Dconfig.file=/data/application.docker.conf" env: - # Conditionally set these env variables, if runAsUser is not 0(root) - # or 10001(yugabyte). - {{- if eq (include "checkNssWrapperExportRequired" .) "true" }} - - name: NSS_WRAPPER_GROUP - value: "/tmp/group.template" - - name: NSS_WRAPPER_PASSWD - value: "/tmp/passwd.template" - - name: LD_PRELOAD - value: "/usr/lib64/libnss_wrapper.so" - {{- end }} - name: POSTGRES_USER valueFrom: secretKeyRef: @@ -433,7 +286,6 @@ spec: secretKeyRef: name: {{ .Release.Name }}-yugaware-global-config key: app_secret - {{- with .Values.yugaware.extraEnv }}{{ toYaml . | nindent 12 }}{{ end }} ports: - containerPort: 9000 name: yugaware @@ -450,9 +302,6 @@ spec: - name: yugaware-storage mountPath: /opt/yugabyte/releases/ subPath: releases - - name: yugaware-storage - mountPath: /opt/yugabyte/ybc/releases/ - subPath: ybc_releases # old path for backward compatibility - name: yugaware-storage mountPath: /opt/releases/ @@ -466,16 +315,9 @@ spec: - name: yugaware-storage mountPath: /prometheus_configs subPath: prometheus.yml - {{- if and (not .Values.useNginxProxy) (.Values.tls.enabled) }} - - name: {{ .Release.Name }}-yugaware-tls-pem - mountPath: /opt/certs/ - readOnly: true - {{- end }} - {{- if .Values.useNginxProxy }} - name: nginx image: {{ include "full_image" (dict "containerName" "nginx" "root" .) }} imagePullPolicy: {{ .Values.image.pullPolicy }} - args: ["nginx", "-g", "daemon off;"] ports: - containerPort: 8080 @@ -495,7 +337,6 @@ spec: mountPath: /opt/certs/ readOnly: true {{- end }} - {{- end }} {{ if .Values.sidecars }} {{ toYaml .Values.sidecars | indent 8 }} {{ end }} diff --git a/charts/yugabyte/yugaware/templates/tests/test.yaml b/charts/yugabyte/yugaware/templates/tests/test.yaml deleted file mode 100644 index 1c36fe948..000000000 --- a/charts/yugabyte/yugaware/templates/tests/test.yaml +++ /dev/null @@ -1,35 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: {{ .Release.Name }}-yugaware-test - labels: - app: {{ .Release.Name }}-yugaware-test - chart: {{ template "yugaware.chart" . }} - release: {{ .Release.Name }} - annotations: - "helm.sh/hook": test -spec: - imagePullSecrets: - - name: {{ .Values.image.pullSecret }} - containers: - - name: yugaware-test - image: {{ include "full_yugaware_image" . }} - command: - - '/bin/bash' - - '-ec' - {{- if .Values.tls.enabled }} - - > - curl --head -k https://{{ .Release.Name }}-yugaware-ui - {{- else }} - - > - curl --head http://{{ .Release.Name }}-yugaware-ui - {{- end }} - # Hard coded resources to the test pod. - resources: - limits: - cpu: "1" - memory: "512Mi" - requests: - cpu: "0.5" - memory: "256Mi" - restartPolicy: Never diff --git a/charts/yugabyte/yugaware/tests/test_resources.yaml b/charts/yugabyte/yugaware/tests/test_resources.yaml deleted file mode 100644 index cc793a585..000000000 --- a/charts/yugabyte/yugaware/tests/test_resources.yaml +++ /dev/null @@ -1,40 +0,0 @@ -suite: Resources verification -templates: -- statefulset.yaml -- configs.yaml -tests: -- it: YBA container - template: statefulset.yaml - asserts: - - isNotEmpty: - path: spec.template.spec.containers[?(@.name == "yugaware")].resources.requests - -- it: Postgres container - template: statefulset.yaml - asserts: - - isNotEmpty: - path: spec.template.spec.containers[?(@.name == "postgres")].resources.requests - -- it: Prometheus container - template: statefulset.yaml - asserts: - - isNotEmpty: - path: spec.template.spec.containers[?(@.name == "prometheus")].resources.requests - -- it: Postgres-init initContainer - template: statefulset.yaml - asserts: - - isNotEmpty: - path: spec.template.spec.initContainers[?(@.name == "postgres-init")].resources.requests - -- it: Prometheus-configuration initContainer - template: statefulset.yaml - asserts: - - isNotEmpty: - path: spec.template.spec.initContainers[?(@.name == "prometheus-configuration")].resources.requests - -- it: Postgres-upgrade initContainer - template: statefulset.yaml - asserts: - - isNotEmpty: - path: spec.template.spec.initContainers[?(@.name == "postgres-upgrade")].resources.requests diff --git a/charts/yugabyte/yugaware/values.yaml b/charts/yugabyte/yugaware/values.yaml index 99fc380be..c838dc47f 100644 --- a/charts/yugabyte/yugaware/values.yaml +++ b/charts/yugabyte/yugaware/values.yaml @@ -2,26 +2,20 @@ # This is a YAML-formatted file. # Declare variables to be passed into your templates. -fullnameOverride: "" -nameOverride: "" - -# Cloud team will retain nginx for sometime -# until they start creating a separate pool -useNginxProxy: false - image: commonRegistry: "" # Setting commonRegistry to say, quay.io overrides the registry settings for all images # including the yugaware image repository: quay.io/yugabyte/yugaware - tag: 2.18.2.1-b1 + tag: 2.14.12.0-b19 pullPolicy: IfNotPresent pullSecret: yugabyte-k8s-pull-secret ## Docker config JSON File name ## If set, this file content will be used to automatically create secret named as above - pullSecretFile: "" - + # pullSecretFile: + + postgres: registry: "" tag: '14.8' @@ -34,12 +28,12 @@ image: prometheus: registry: "" - tag: v2.44.0 + tag: v2.46.0 name: prom/prometheus nginx: registry: "" - tag: 1.25.0 + tag: 1.25.1 name: nginxinc/nginx-unprivileged yugaware: @@ -48,39 +42,30 @@ yugaware: storageClass: "" storageAnnotations: {} multiTenant: false - ## Name of existing ServiceAccount. When provided, the chart won't create a ServiceAccount. - ## It will attach the required RBAC roles to it. - ## Helpful in Yugabyte Platform GKE App. - serviceAccount: '' + serviceAccount: yugaware serviceMonitor: enabled: false annotations: {} serviceAccountAnnotations: {} service: annotations: {} - clusterIP: "" enabled: true ip: "" type: "LoadBalancer" - ## whitelist source CIDRs - #loadBalancerSourceRanges: - #- 0.0.0.0/0 - #- 192.168.100.0/24 pod: annotations: {} - labels: {} health: username: "" password: "" email: "" resources: requests: - cpu: "2" + cpu: 2 memory: 4Gi enableProxyMetricsAuth: true ## List of additional alowed CORS origins in case of complex rev-proxy additionAllowedCorsOrigins: [] - proxyEndpointTimeoutMs: 3 minute + proxyEndpointTimeoutMs: 1 minute ## Enables features specific for cloud deployments cloud: enabled: false @@ -91,16 +76,8 @@ yugaware: # Note that the default of 0 doesn't really make sense since a StatefulSet isn't allowed to schedule extra replicas. However it is maintained as the default while we do additional testing. This value will likely change in the future. maxUnavailable: 0 - universe_boot_script: "" - - extraEnv: [] - ## Configure PostgreSQL part of the application postgres: - # DO NOT CHANGE if using OCP Certified helm chart - user: postgres - dbname: yugaware - service: ## Expose internal Postgres as a Service enabled: false @@ -113,12 +90,12 @@ postgres: resources: requests: - cpu: "0.5" + cpu: 0.5 memory: 1Gi # If external.host is set then we will connect to an external postgres database server instead of starting our own. external: - host: "" + host: null port: 5432 pass: "" dbname: postgres @@ -127,65 +104,22 @@ postgres: ## JDBC connection parameters including the leading `?`. jdbcParams: "" - - ## Extra volumes - ## extraVolumesMounts are mandatory for each extraVolumes. - ## Ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#volume-v1-core - ## Example: - # extraVolumes: - # - name: custom-nfs-vol - # persistentVolumeClaim: - # claimName: some-nfs-claim - extraVolumes: [] - - ## Extra volume mounts - ## Ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#volumemount-v1-core - ## Example: - # extraVolumeMounts: - # - name: custom-nfs-vol - # mountPath: /home/yugabyte/nfs-backup - extraVolumeMounts: [] - tls: enabled: false hostname: "localhost" - ## Expects base 64 encoded values for certificate and key. - certificate: "" - key: "" + certificate: "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUZDVENDQXZHZ0F3SUJBZ0lVTlhvN2N6T2dyUWQrU09wOWdNdE00b1Vva3hFd0RRWUpLb1pJaHZjTkFRRUwKQlFBd0ZERVNNQkFHQTFVRUF3d0piRzlqWVd4b2IzTjBNQjRYRFRJeE1EUXdOakExTXpnMU4xb1hEVE14TURRdwpOREExTXpnMU4xb3dGREVTTUJBR0ExVUVBd3dKYkc5allXeG9iM04wTUlJQ0lqQU5CZ2txaGtpRzl3MEJBUUVGCkFBT0NBZzhBTUlJQ0NnS0NBZ0VBMUxsSTFBLzRPOVIzSkNlN1N2MUxYVXhDSmxoTWpIWUoxV1FNVmcvai82RHkKazRTTmY0MkFLQjI0dFJFK2lEWTBNaTJrRWhJcVZ4TFdPN0hkWHVSN0tYNGxSZWFVVkRFTUtYUWNQUC9QWDZkbwpwZVZTUFpSVjVHNHNxTElXUFFkTVdIam9IQWx1aml5dGJsSVJUUWdLU3QrMmpuREFDN0dxRURMREdhNXRUWEM2CktRWkNtOERlaklOUTMzaGU2TDN0Q2hBRnhJM1pwY21sR0twbzdKVXJSUG14Mk9zTHFRcTB5dEVVK0lGZGppWHEKaHJLeFR0NUhHM3M3ZUNWaTRXdlZPelVGUitJbWRlQzBRZTBXeG5iZlZUMnJkVitQL1FaVXhWSEVtWnBPc0k2LwpmczhlK1dsMlduWXY1TTg5MWkxZER3Zi9lMDdiN20xQVRKdDRtTGRldzBtd1V4UGFGT2pDMDh6cU94NmF0cGhLClU1eHNWQmhGNVhyME9DeTQyMzN0MU5URXdWUEFDOFcwQmhHdldTRXBQTXNTKzM1b2lueEFrcFQzL01ibFpjNisKcXhSYUh6MHJhSksvVGIzelVKVWxWZFkxbGl5MVYyVjNxWEU2NWlsOUFHZ2pIaHhBNFBwSktCbzZ0WVRUT3pnTworL25mc0toMk95aE8zUWxBZ0JFUHlYUm5wL0xGSTVuQ2gzdjNiOXlabFNrSk05NkVoWEJ1bHhWUWN3L2p3N2NxCkRLSlBEeHFUQy9rWUs1V0FVZGhkWG1KQkRNMFBLcngzUGVOYjRsYnQzSTFIZW1QRDBoZktiWFd6alhiVTJQdWQKdjZmT0dXTDRLSFpaem9KZ1ljMFovRXRUMEpCR09GM09mMW42N2c5dDRlUnAzbEVSL09NM0FPY1dRbWFvOHlVQwpBd0VBQWFOVE1GRXdIUVlEVlIwT0JCWUVGTU00SjA4WG8wUGY1cTlOSWZiMGYyRzZqc1FoTUI4R0ExVWRJd1FZCk1CYUFGTU00SjA4WG8wUGY1cTlOSWZiMGYyRzZqc1FoTUE4R0ExVWRFd0VCL3dRRk1BTUJBZjh3RFFZSktvWkkKaHZjTkFRRUxCUUFEZ2dJQkFBRmxrWVJkdzA0Zm9vT29BelUyaU5ORGV1aiszemhIeFQ5eU9iSkdwREZIRitoZQpuY1ZRWGZpMitHNjBWY0xuZERsWFhmbDZLOSs4ME55aEg4QjR1UEJNTWhoWG01MjJmYnJac1dFcnR3WE1rM2prClZ5UVA3MGk2NHE1ZGVrZzhoYzI0SXhFUlVsam9XM2lDTTdrb0VxaG15VkpGeDNxMVdobFEwdzNkWVpMQVNRclYKU0RpL2JGWjlqOXVtWVdoc0Y4QjFPSThPVjNlL0YyakU1UCtoTlJJazAzbW9zWE1Rdy9iZ3ZzV0hvSkZ5blB4UApHNGUzUjBob2NnbzI0Q2xOQ21YMWFBUms5c1pyN2h0NlVsM1F1d0dMdzZkK2I5emxrUW56TzFXQzc5ekVNU1R0ClRRRzFNT2ZlL2dTVkR3dThTSnpBOHV1Z0pYTktWWkxCZlpaNW41Tk9sOHdpOVVLa1BVUW4wOHo3VWNYVDR5ZnQKZHdrbnZnWDRvMFloUnNQNHpPWDF6eWxObzhqRDhRNlV1SkdQSksrN1JnUm8zVERPV3k4MEZpUzBxRmxrSFdMKwptT0pUWGxzaEpwdHE5b1c1eGx6N1lxTnFwZFVnRmNyTjJLQWNmaGVlNnV3SUFnOFJteTQvRlhRZjhKdXluSG5oClFhVlFnTEpEeHByZTZVNk5EdWg1Y1VsMUZTcWNCUGFPY0x0Q0ViVWg5ckQxajBIdkRnTUUvTTU2TGp1UGdGZlEKMS9xeXlDUkFjc2NCSnVMYjRxcXRUb25tZVZ3T1BBbzBsNXBjcC9JcjRTcTdwM0NML0kwT0o1SEhjcmY3d3JWSgpQVWgzdU1LbWVHVDRyeDdrWlQzQzBXenhUU0loc0lZOU12MVRtelF4MEprQm93c2NYaUYrcXkvUkl5UVgKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=" + key: "LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUpRd0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQ1Mwd2dna3BBZ0VBQW9JQ0FRRFV1VWpVRC9nNzFIY2sKSjd0Sy9VdGRURUltV0V5TWRnblZaQXhXRCtQL29QS1RoSTEvallBb0hiaTFFVDZJTmpReUxhUVNFaXBYRXRZNwpzZDFlNUhzcGZpVkY1cFJVTVF3cGRCdzgvODlmcDJpbDVWSTlsRlhrYml5b3NoWTlCMHhZZU9nY0NXNk9MSzF1ClVoRk5DQXBLMzdhT2NNQUxzYW9RTXNNWnJtMU5jTG9wQmtLYndONk1nMURmZUY3b3ZlMEtFQVhFamRtbHlhVVkKcW1qc2xTdEUrYkhZNnd1cENyVEswUlQ0Z1YyT0plcUdzckZPM2tjYmV6dDRKV0xoYTlVN05RVkg0aVoxNExSQgo3UmJHZHQ5VlBhdDFYNC85QmxURlVjU1ptazZ3anI5K3p4NzVhWFphZGkva3p6M1dMVjBQQi85N1R0dnViVUJNCm0zaVl0MTdEU2JCVEU5b1U2TUxUek9vN0hwcTJtRXBUbkd4VUdFWGxldlE0TExqYmZlM1UxTVRCVThBTHhiUUcKRWE5WklTazh5eEw3Zm1pS2ZFQ1NsUGY4eHVWbHpyNnJGRm9mUFN0b2tyOU52Zk5RbFNWVjFqV1dMTFZYWlhlcApjVHJtS1gwQWFDTWVIRURnK2trb0dqcTFoTk03T0E3NytkK3dxSFk3S0U3ZENVQ0FFUS9KZEdlbjhzVWptY0tICmUvZHYzSm1WS1FrejNvU0ZjRzZYRlZCekQrUER0eW9Nb2s4UEdwTUwrUmdybFlCUjJGMWVZa0VNelE4cXZIYzkKNDF2aVZ1M2NqVWQ2WThQU0Y4cHRkYk9OZHRUWSs1Mi9wODRaWXZnb2Rsbk9nbUJoelJuOFMxUFFrRVk0WGM1LwpXZnJ1RDIzaDVHbmVVUkg4NHpjQTV4WkNacWp6SlFJREFRQUJBb0lDQUFmY2lScDlOSmxSY3MyOVFpaTFUN0cwCi9jVFpBb3MyV1lxdlZkMWdYUGEzaGY5NXFKa01LNjVQMnVHbUwzOXRNV1NoVnl6cnl2REkyMjM5VnNjSS9wdzcKOHppd0dzODV1TTlYWVN2SDhHd0NqZFdEc2hSZ2hRUWFKa0JkeElDZzRtdHFuSGxjeDk4dE80T1dPTmwxOEp0dgp4UmxpaFZacFRIV295cGtLWHpPN2RNWExXMjdTSStkaGV2Mm5QeXF1eWpIVEFjT1AwbmxVQ0d2dThFMjkvWWxoCkNQZVJTQzhKSEVGYWxNSFNWaGpJd2ZBVWJvVVJwZU1ZSE15RjVTK2JncGZiajhSbVVUR09DbHRkWGJnYjhJai8KN0hROEFlQkIrYVFKTDVEVnFRN1JWN1ppQlMwR2ZyODlHdXdEMUs4em9mcktPdURkdXpjR2hwZk9MeGpGdmhTOApSQ2Y1Z3BFMzg0aWlHc2tWZC9mZDJLK3NhSmk0L09HbHo0aHhhc1hDcTN1TXB5OTZPNFRrMXZzM3BXdWZNVmJXCnR2d1Mrcjhvbk9uOXZqa3lqOU11eUpId1BpSlNGMUt0ZzhPUU5WMlVST0xXcHlYMWk4Z2xoMXdSelRTQ2diQnMKZ3ZxWkFvaU1pWFh3SlVXN3Zpb0RLZjI0TnZvcjViaVNzeUh0MHVKUVZJaW1iK1prTFJwTWdwRlkyTlcrTnd6LwoxOW9DS2ZUVVpWNkJia09IK0NoOUowLy9hTTRGNnUvMTI4V0UxalJQU05mdWQ0b0dpdGVPNXRsRDNWSXRsb1hlCjNyWVMrcTNuYXU1RStWc2FRZGFVNzhrSnpXYmUrWURmQ1JwWGd6TkloSkMyQ1k5d0RSK3hIaVFwbzdLSHV6dngKUkpuRjhIcGwzdWhIdWxEam44dEpBb0lCQVFEeGxhVVIwN1l6TGF2OVZtamZCenpZMjcwOU9tWnhpa3NtRnlhWApKTkJMQVB3SGdXOEVCUHdKOEprSDhXR1NTekp1OXZGd1JDVEVqZ1J5dWUvS05DWnNmUWF2UDg3dzhablJHaEhjCklHUUV1MFN3bmJzZXFJK1VWa0M5amZjaFE4dlowM0dQTGZ6bWpsSW9PNkNLTVM3TlV2Ynk5MksvOHRVVWRtWWgKMmJJa2N4V0J1RDJoenh3K1ZId3ArWktMQ0FPZi9sOG8vQ20xQ1dZSFNGdVYzTkl3T016Z2FKaExJODJNR08zQwpuODZTMXcweGc2MHB5dUV6L0hXZS9JMFZkRGNsWlgyNC9jalVBb01kQlkvSGY4Tkh2ZUNhZExQeXI3eGpRY2NLClAzN0RhdFRyK2RTZ2RoVkxzUDRRRzVVZEZxNUlMSHoxTXBkb2xXZ2pDSlZqcTZMekFvSUJBUURoYXNYdVRzMDIKNEkvYkRlSGRZSmw2Q1NzVUh2NmJXL3dpYlRhd2dpbDh5RUNWS2x6eFY4eENwWnoxWVhRQlY1YnVvQlArbjZCWApnVHgzTTJHc2R5UU1xdGRCWG9qdGp1czB6ekFNQVQzOWNmdWlHMGR0YXF3eWJMVlEwYThDZnFmMDVyUmZ0ekVmCmtTUDk2d01kVUEyTGdCbnU4akwzOU41UkxtK2RpZUdxeDAwYmJTa3l5UE9HNHIvcDl6KzN6TmVmeUhmbm94bTkKUnQza1RpeGhVNkd4UGhOSnZpWEUrWUpwT0dKVXMvK2dUWWpjUE1zRW9ONHIyR215cUs3S21NZExFa3Y1SHliWgprbmNsV2FMVFlhNEpjMjJUaWZJd01NTWMwaCtBMkJVckdjZFZ6MTA0UXluUFZQZDdXcEszenhqcjRPUHh1YnQ2CjZvTWk2REdRSVNlSEFvSUJBUURTK1YyVHFQRDMxczNaU3VvQXc2Qld2ZWVRbmZ5eThSUFpxdVFQb0oycXNxeG0KblpsbXlEZVhNcDloK1dHOVVhQTBtY0dWeWx6VnJqU2lRRkR4cEFOZVFQMWlkSFh6b3ZveVN2TUg2dDJONkVELwpnRy9XUVZ4S0xkMFI3UFhCL2lQN0VaV2RkWXJqaWF5ajZCYTJPR2RuOWlrbFcvZklLM2Y4QzczN2w5TGoxQUVYCkxOL2QvREh0R1BqcDYwTVgyYUxZeVZzdlBxL3BvdENRVVpkeDA4dFhRM05nRXRmVTN1cDFpNXV2bU1IZEtLTWoKOTV0MDRQRTA1aWVOOVgzOEcyYkJhTldYaFVJcUxCdDJiOUgxWmxVU3hQWnR6TGNObkgwSHJYejJMU2MxMzRrYwpueXdhQ2FWbFdhYzJSL0E3Mi8vTmxkUjJpWDBDWDEvM0lGcmVGUmtUQW9JQkFBbGt0S2pRbWRhZWx3QU8zUW1uCm05MnRBaUdOaFJpZVJheDlscGpXWTdveWNoYUZOR2hPTzFIUHF2SEN4TjNGYzZHd0JBVkpTNW81NVhZbUt2elAKM2kyMDlORmhpaDAwSm5NRjZ6K2swWnQ5STNwRzNyd2RoTjE1RURrMDg3RUw3QjNWZTFDOXhvdEZOaFcvdEZxRgpXbnNrdEcvem9kSVpYeVpNNUJQUmloamV3MFRRVUxZd0Q0M2daeFR0MjdiaUQxNDJNV0R5dUFEZU1pTHdhd01IClJDYXBxbzRaSVdQSzdmZEtoVFo0WmIrZFc0V3A5dC9UZ0U2ZGJ4SWwyMXJQOFFZYzFoT2tpNjduWHBXczNZOG4KYytRcTdqY0d1WlB1aEVMd01xWGcyMGozZ3duOVlTb1dDbWo4Wm0rNmY0Q3ZYWjkrdUtEN0YyZncyOVFaanU4dApvb01DZ2dFQkFPbVVHZ1VoT0tUVys1eEpkZlFKRUVXUncyVFF6Z2l6dSt3aVkzaDYrYXNTejRNY0srVGx6bWxVCmFHT013dFhTUzc0RXIxVmlCVXMrZnJKekFPR21IV0ExZWdtaGVlY1BvaE9ybTh5WkVueVJOSkRhWC9UUXBSUnEKaVdoWENBbjJTWFQxcFlsYVBzMjdkbXpFWnQ3UlVUSkJZZ1hHZXQ4dXFjUXZaVDJZK3N6cHFNV3UzaEpWdmIxdgpZNGRJWE12RG1aV1BPVjFwbHJEaTVoc214VW05TDVtWk1IblllNzFOYkhsaEIxK0VUNXZmWFZjOERzU1RRZWRRCitDRHJKNGQ0em85dFNCa2pwYTM5M2RDRjhCSURESUQyWkVJNCtBVW52NWhTNm82NitOLzBONlp3cXkwc2pKY0cKQ21LeS9tNUpqVzFJWDMxSmZ1UU5Ldm9YNkRFN0Zkaz0KLS0tLS1FTkQgUFJJVkFURSBLRVktLS0tLQo=" sslProtocols: "" # if set, override default Nginx SSL protocols setting - ## cert-manager values - ## If cert-manager is enabled: - ## If genSelfsigned: true: - ## Create a self-signed issuer/clusterIssuer - ## Generate a rootCA using the above issuer. - ## Generate a tls certificate with secret name as: {{ .Release.Name }}-yugaware-tls-cert - ## Else if genSelfsigned: false: - ## Expect a clusterIssuer/issuer to be provided by user - ## Generate a tls cert based on above issuer with secret name as: {{ .Release.Name }}-yugaware-tls-cert - certManager: - enabled: false - genSelfsigned: true - useClusterIssuer: false - clusterIssuer: cluster-ca - issuer: yugaware-ca - ## Configuration for the TLS certificate requested from Issuer/ClusterIssuer - configuration: - duration: 8760h # 90d - renewBefore: 240h # 15d - algorithm: RSA # ECDSA or RSA - # Can be 2048, 4096 or 8192 for RSA - # Or 256, 384 or 521 for ECDSA - keySize: 2048 ## yugaware pod Security Context ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ securityContext: - enabled: true + enabled: false ## fsGroup related values are set at the pod level. fsGroup: 10001 fsGroupChangePolicy: "OnRootMismatch" - ## Expected to have runAsUser values != 0 when - ## runAsNonRoot is set to true, otherwise container creation fails. + ## The following values are set for yugaware and prometheus containers. + ## Setting runAsUser other than 10001 will fail the VM universe deployment flow. runAsUser: 10001 runAsGroup: 10001 runAsNonRoot: true @@ -209,11 +143,9 @@ nginx: resources: requests: - cpu: "0.25" + cpu: 0.25 memory: 300Mi - proxyReadTimeoutSec: 600 - rbac: ## Set this to false if you don't have enough permissions to create ## ClusterRole and Binding, for example an OpenShift cluster. When @@ -229,45 +161,15 @@ ocpCompatibility: # Extra containers to add to the pod. sidecars: [] -## Following two controls for placement of pod - nodeSelector and AZ affinity. -## Note: Remember to also provide a yugaware.StorageClass that has a olumeBindingMode of -## WaitForFirstConsumer so that the PVC is created in the right topology visible to this pod. -## See https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector -## eg. -## nodeSelector: -## topology.kubernetes.io/region: us-west1 -nodeSelector: {} - -## Affinity to a particular zone for the pod. -## See https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity -## eg. -## nodeAffinity: -## requiredDuringSchedulingIgnoredDuringExecution: -## nodeSelectorTerms: -## - matchExpressions: -## - key: failure-domain.beta.kubernetes.io/zone -## operator: In -## values: -## - us-west1-a -## - us-west1-b -zoneAffinity: {} - -## The tolerations that the pod should have. -## See https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ -tolerations: [] - ## Don't want prometheus to scrape nodes and evaluate alert rules in some cases (for example - cloud). prometheus: scrapeNodes: true evaluateAlertRules: true retentionTime: 15d - queryConcurrency: 20 - queryMaxSamples: 5000000 - queryTimeout: 30s resources: requests: - cpu: "2" + cpu: 2 memory: 4Gi ## Prometheus remote write config, as described here: @@ -288,10 +190,8 @@ prometheus: # Arbitrary key=value config entries for application.docker.conf additionalAppConf: - stringConf: {} - nonStringConf: {} - -jdbcParams: "" + stringConf: + nonStringConf: ## Override the APIVersion used by policy group for ## PodDisruptionBudget resources. The chart selects the correct @@ -299,25 +199,3 @@ jdbcParams: "" ## to modify this unless you are using helm template command i.e. GKE ## app's deployer image against a Kubernetes cluster >= 1.21. # pdbPolicyVersionOverride: "v1beta1" -pdbPolicyVersionOverride: "" - -initContainers: - prometheusConfiguration: - resources: - ## https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-requests-and-limits-of-pod-and-container - ## Use the above link to learn more about Kubernetes resources configuration. - requests: - cpu: "0.25" - memory: 500Mi - - postgresUpgrade: - resources: - requests: - cpu: "0.5" - memory: 500Mi - - postgresInit: - resources: - requests: - cpu: "0.25" - memory: 500Mi diff --git a/index.yaml b/index.yaml index 734892453..efeeaf92e 100644 --- a/index.yaml +++ b/index.yaml @@ -21803,6 +21803,37 @@ entries: - assets/haproxy/haproxy-1.4.300.tgz version: 1.4.300 harbor: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Harbor + catalog.cattle.io/kube-version: '>=1.20-0' + catalog.cattle.io/release-name: harbor + apiVersion: v1 + appVersion: 2.9.0 + created: "2023-08-30T19:39:38.008119843Z" + description: An open source trusted cloud native registry that stores, signs, + and scans content + digest: 67e02a766502d6227e91fcf93b046df7ab63e9ba3e52da3f4c0791e621d91721 + home: https://goharbor.io + icon: https://raw.githubusercontent.com/goharbor/website/main/static/img/logos/harbor-icon-color.png + keywords: + - docker + - registry + - harbor + maintainers: + - email: yinw@vmware.com + name: Wenkai Yin + - email: hweiwei@vmware.com + name: Weiwei He + - email: yshengwen@vmware.com + name: Shengwen Yu + name: harbor + sources: + - https://github.com/goharbor/harbor + - https://github.com/goharbor/harbor-helm + urls: + - assets/harbor/harbor-1.13.0.tgz + version: 1.13.0 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: Harbor @@ -26652,6 +26683,58 @@ entries: - assets/kasten/k10-4.5.900.tgz version: 4.5.900 kafka: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Apache Kafka + catalog.cattle.io/kube-version: '>=1.19-0' + catalog.cattle.io/release-name: kafka + category: Infrastructure + images: | + - name: jmx-exporter + image: docker.io/bitnami/jmx-exporter:0.19.0-debian-11-r57 + - name: kafka-exporter + image: docker.io/bitnami/kafka-exporter:1.7.0-debian-11-r93 + - name: kafka + image: docker.io/bitnami/kafka:3.5.1-debian-11-r35 + - name: kubectl + image: docker.io/bitnami/kubectl:1.25.13-debian-11-r5 + - name: os-shell + image: docker.io/bitnami/os-shell:11-debian-11-r51 + licenses: Apache-2.0 + apiVersion: v2 + appVersion: 3.5.1 + created: "2023-08-30T19:39:34.710902238Z" + dependencies: + - condition: zookeeper.enabled + name: zookeeper + repository: file://./charts/zookeeper + version: 12.x.x + - name: common + repository: file://./charts/common + tags: + - bitnami-common + version: 2.x.x + description: Apache Kafka is a distributed streaming platform designed to build + real-time pipelines and can be used as a message broker or as a replacement + for a log aggregation solution for big data applications. + digest: 9f83c79b99ea486b2e032bd9dc3e3f8888d9aeaf98c827f346a980c49a2f7329 + home: https://bitnami.com + icon: https://svn.apache.org/repos/asf/comdev/project-logos/originals/kafka.svg + keywords: + - kafka + - zookeeper + - streaming + - producer + - consumer + maintainers: + - name: VMware, Inc. + url: https://github.com/bitnami/charts + name: kafka + sources: + - https://github.com/bitnami/charts/tree/main/bitnami/kafka + urls: + - assets/bitnami/kafka-25.1.4.tgz + version: 25.1.4 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: Apache Kafka @@ -29702,6 +29785,46 @@ entries: - assets/kong/kong-2.3.1.tgz version: 2.3.1 koor-operator: + - annotations: + artifacthub.io/category: storage + artifacthub.io/crds: | + - kind: KoorCluster + version: v1 + name: koorcluster + displayName: Koor Cluster + description: "Resource to control the creation of a Koor Storage Cluster." + artifacthub.io/license: Apache-2.0 + artifacthub.io/operator: "true" + artifacthub.io/operatorCapabilities: basic install + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Koor Operator + catalog.cattle.io/kube-version: '>=1.19.0' + catalog.cattle.io/release-name: koor-operator + apiVersion: v2 + appVersion: v0.3.6 + created: "2023-08-30T19:39:39.906677918Z" + dependencies: + - alias: certmanager + condition: certmanager.enabled + name: cert-manager + repository: file://./charts/cert-manager + version: v1.12.3 + description: A Helm chart for deploying the Koor Operator to Kubernetes + digest: 13a2870a55b293b57a332ffbc327782f711fa18b9acad23e4bb85475499e489b + icon: https://koor.tech/images/favicon.svg + keywords: + - storage + - operator + - rook + - ceph + kubeVersion: '>=1.19.0' + name: koor-operator + sources: + - https://github.com/koor-tech/koor-operator/ + type: application + urls: + - assets/koor-tech/koor-operator-0.3.6.tgz + version: 0.3.6 - annotations: artifacthub.io/category: storage artifacthub.io/crds: | @@ -30546,6 +30669,33 @@ entries: - assets/avesha/kubeslice-worker-0.4.5.tgz version: 0.4.5 kuma: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Kuma + catalog.cattle.io/namespace: kuma-system + catalog.cattle.io/release-name: kuma + apiVersion: v2 + appVersion: 2.4.0 + created: "2023-08-30T19:39:40.311031705Z" + description: A Helm chart for the Kuma Control Plane + digest: db80b48cbcdecda71b966fcaf25dee4cc83564d348816edd47d906320a2242b0 + home: https://github.com/kumahq/kuma + icon: https://kuma.io/assets/images/brand/kuma-logo-new.svg + keywords: + - service mesh + - control plane + maintainers: + - email: austin.cawley@gmail.com + name: austince + - email: jakub.dyszkiewicz@konghq.com + name: jakubdyszkiewicz + - email: nikolay.nikolaev@konghq.com + name: nickolaev + name: kuma + type: application + urls: + - assets/kuma/kuma-2.4.0.tgz + version: 2.4.0 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: Kuma @@ -38788,6 +38938,25 @@ entries: - assets/pixie/pixie-operator-chart-0.0.2501.tgz version: 0.0.2501 polaris: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Fairwinds Polaris + catalog.cattle.io/kube-version: '>= 1.22.0-0' + catalog.cattle.io/release-name: polaris + apiVersion: v1 + appVersion: "8.5" + created: "2023-08-30T19:39:37.706711558Z" + description: Validation of best practices in your Kubernetes clusters + digest: ef7150827cfaae76e5d19be56306478936a645a6a45d4b45a55fe3d87e603091 + icon: https://polaris.docs.fairwinds.com/img/polaris-logo.png + kubeVersion: '>= 1.22.0-0' + maintainers: + - email: robertb@fairwinds.com + name: rbren + name: polaris + urls: + - assets/fairwinds/polaris-5.14.0.tgz + version: 5.14.0 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: Fairwinds Polaris @@ -43520,6 +43689,50 @@ entries: - assets/bitnami/redis-17.3.7.tgz version: 17.3.7 redpanda: + - annotations: + artifacthub.io/images: | + - name: redpanda + image: docker.redpanda.com/redpandadata/redpanda:v23.2.7 + - name: busybox + image: busybox:latest + - name: mintel/docker-alpine-bash-curl-jq + image: mintel/docker-alpine-bash-curl-jq:latest + artifacthub.io/license: Apache-2.0 + artifacthub.io/links: | + - name: Documentation + url: https://docs.redpanda.com + - name: "Helm (>= 3.6.0)" + url: https://helm.sh/docs/intro/install/ + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Redpanda + catalog.cattle.io/kube-version: '>=1.21-0' + catalog.cattle.io/release-name: redpanda + apiVersion: v2 + appVersion: v23.2.7 + created: "2023-08-30T19:39:41.582229198Z" + dependencies: + - condition: console.enabled + name: console + repository: file://./charts/console + version: '>=0.5 <1.0' + - condition: connectors.enabled + name: connectors + repository: file://./charts/connectors + version: '>=0.1.2 <1.0' + description: Redpanda is the real-time engine for modern apps. + digest: 926108e48e7e66a9e8ffa151d15c973bc32a3b19ce5daa47f19a7262fca88570 + icon: https://images.ctfassets.net/paqvtpyf8rwu/3cYHw5UzhXCbKuR24GDFGO/73fb682e6157d11c10d5b2b5da1d5af0/skate-stand-panda.svg + kubeVersion: '>=1.21-0' + maintainers: + - name: redpanda-data + url: https://github.com/orgs/redpanda-data/people + name: redpanda + sources: + - https://github.com/redpanda-data/helm-charts + type: application + urls: + - assets/redpanda/redpanda-5.2.0.tgz + version: 5.2.0 - annotations: artifacthub.io/images: | - name: redpanda @@ -49893,6 +50106,32 @@ entries: - assets/sumologic/sumologic-2.17.0.tgz version: 2.17.0 sysdig: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Sysdig + catalog.cattle.io/release-name: sysdig + apiVersion: v1 + appVersion: 12.16.0 + created: "2023-08-30T19:39:41.984082553Z" + deprecated: true + description: Sysdig Monitor and Secure agent + digest: 11edb74fc942bc09757854bd2f42d15caf3f0e55b95cd9992d37898cd8fdb457 + home: https://www.sysdig.com/ + icon: https://avatars.githubusercontent.com/u/5068817?s=200&v=4 + keywords: + - monitoring + - security + - alerting + - metric + - troubleshooting + - run-time + name: sysdig + sources: + - https://app.sysdigcloud.com/#/settings/user + - https://github.com/draios/sysdig + urls: + - assets/sysdig/sysdig-1.16.9.tgz + version: 1.16.9 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: Sysdig @@ -54514,6 +54753,60 @@ entries: - assets/hashicorp/vault-0.22.0.tgz version: 0.22.0 wordpress: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: WordPress + catalog.cattle.io/kube-version: '>=1.19-0' + catalog.cattle.io/release-name: wordpress + category: CMS + images: | + - name: apache-exporter + image: docker.io/bitnami/apache-exporter:1.0.1-debian-11-r29 + - name: os-shell + image: docker.io/bitnami/os-shell:11-debian-11-r51 + - name: wordpress + image: docker.io/bitnami/wordpress:6.3.1-debian-11-r0 + licenses: Apache-2.0 + apiVersion: v2 + appVersion: 6.3.1 + created: "2023-08-30T19:39:36.48688784Z" + dependencies: + - condition: memcached.enabled + name: memcached + repository: file://./charts/memcached + version: 6.x.x + - condition: mariadb.enabled + name: mariadb + repository: file://./charts/mariadb + version: 13.x.x + - name: common + repository: file://./charts/common + tags: + - bitnami-common + version: 2.x.x + description: WordPress is the world's most popular blogging and content management + platform. Powerful yet simple, everyone from students to global corporations + use it to build beautiful, functional websites. + digest: 1d1cedd3729010a13c8b1efff88075536fb2ade3fd95cd8e3c54c8f6a88dbcfc + home: https://bitnami.com + icon: https://s.w.org/style/images/about/WordPress-logotype-simplified.png + keywords: + - application + - blog + - cms + - http + - php + - web + - wordpress + maintainers: + - name: VMware, Inc. + url: https://github.com/bitnami/charts + name: wordpress + sources: + - https://github.com/bitnami/charts/tree/main/bitnami/wordpress + urls: + - assets/bitnami/wordpress-17.1.4.tgz + version: 17.1.4 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: WordPress @@ -58225,6 +58518,30 @@ entries: urls: - assets/yugabyte/yugabyte-2.16.0.tgz version: 2.16.0 + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: YugabyteDB + catalog.cattle.io/kube-version: '>=1.18-0' + catalog.cattle.io/release-name: yugabyte + apiVersion: v1 + appVersion: 2.14.12.0-b19 + created: "2023-08-30T19:39:42.403878335Z" + description: YugabyteDB is the high-performance distributed SQL database for building + global, internet-scale apps. + digest: 6fd9e82d92922591903b6de88b3ec130251720f3db162a60b3b6458c71288476 + home: https://www.yugabyte.com + icon: https://avatars0.githubusercontent.com/u/17074854?s=200&v=4 + maintainers: + - email: ram@yugabyte.com + name: Ram Sri + - email: arnav@yugabyte.com + name: Arnav Agarwal + name: yugabyte + sources: + - https://github.com/yugabyte/yugabyte-db + urls: + - assets/yugabyte/yugabyte-2.14.12.tgz + version: 2.14.12 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: YugabyteDB @@ -58693,6 +59010,27 @@ entries: urls: - assets/yugabyte/yugaware-2.16.0.tgz version: 2.16.0 + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: YugabyteDB Anywhere + catalog.cattle.io/kube-version: '>=1.18-0' + catalog.cattle.io/release-name: yugaware + apiVersion: v1 + appVersion: 2.14.12.0-b19 + created: "2023-08-30T19:39:42.445707961Z" + description: YugaWare is YugaByte Database's Orchestration and Management console. + digest: 6b685d9fde8cdc5e0236e0a4e1cebd09e8d505a8d616af90ce5fa8d075a65cf8 + home: https://www.yugabyte.com + icon: https://avatars0.githubusercontent.com/u/17074854?s=200&v=4 + maintainers: + - email: ram@yugabyte.com + name: Ram Sri + - email: arnav@yugabyte.com + name: Arnav Agarwal + name: yugaware + urls: + - assets/yugabyte/yugaware-2.14.12.tgz + version: 2.14.12 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: YugabyteDB Anywhere