Charts CI

```
Updated:
  clastix/kamaji:
    - 0.15.2
  dynatrace/dynatrace-operator:
    - 1.0.1
  external-secrets/external-secrets:
    - 0.9.16
  jenkins/jenkins:
    - 5.1.6
  kubecost/cost-analyzer:
    - 2.2.2
  kuma/kuma:
    - 2.7.0
  loft/loft:
    - 3.4.5
  percona/pxc-db:
    - 1.14.3
  redpanda/redpanda:
    - 5.7.41
  speedscale/speedscale-operator:
    - 2.1.19
  yugabyte/yugabyte:
    - 2.14.16
  yugabyte/yugaware:
    - 2.14.16
```
pull/1012/head
github-actions[bot] 2024-04-18 19:06:04 +00:00
parent 686affaecf
commit 300c366766
101 changed files with 5390 additions and 6173 deletions

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

BIN
assets/kuma/kuma-2.7.0.tgz Normal file

Binary file not shown.

BIN
assets/loft/loft-3.4.5.tgz Normal file

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -4,20 +4,22 @@ annotations:
catalog.cattle.io/kube-version: '>=1.21.0-0'
catalog.cattle.io/release-name: kamaji
apiVersion: v2
appVersion: v0.4.2
description: Kamaji is a Kubernetes Control Plane Manager.
appVersion: v0.5.0
description: Kamaji is the Hosted Control Plane Manager for Kubernetes.
home: https://github.com/clastix/kamaji
icon: https://github.com/clastix/kamaji/raw/master/assets/logo-colored.png
kubeVersion: '>=1.21.0-0'
maintainers:
- email: dario@tranchitella.eu
name: Dario Tranchitella
url: https://clastix.io
- email: me@maxgio.it
name: Massimiliano Giovagnoli
- email: me@bsctl.io
name: Adriano Pezzuto
url: https://clastix.io
name: kamaji
sources:
- https://github.com/clastix/kamaji
type: application
version: 0.15.1
version: 0.15.2

View File

@ -1,16 +1,16 @@
# kamaji
![Version: 0.15.1](https://img.shields.io/badge/Version-0.15.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v0.4.2](https://img.shields.io/badge/AppVersion-v0.4.2-informational?style=flat-square)
![Version: 0.15.2](https://img.shields.io/badge/Version-0.15.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v0.5.0](https://img.shields.io/badge/AppVersion-v0.5.0-informational?style=flat-square)
Kamaji is a Kubernetes Control Plane Manager.
Kamaji is the Hosted Control Plane Manager for Kubernetes.
## Maintainers
| Name | Email | Url |
| ---- | ------ | --- |
| Dario Tranchitella | <dario@tranchitella.eu> | |
| Dario Tranchitella | <dario@tranchitella.eu> | <https://clastix.io> |
| Massimiliano Giovagnoli | <me@maxgio.it> | |
| Adriano Pezzuto | <me@bsctl.io> | |
| Adriano Pezzuto | <me@bsctl.io> | <https://clastix.io> |
## Source Code

View File

@ -30,10 +30,19 @@ spec:
description: DataStore is the Schema for the datastores API.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
@ -41,18 +50,24 @@ spec:
description: DataStoreSpec defines the desired state of DataStore.
properties:
basicAuth:
description: In case of authentication enabled for the given data store, specifies the username and password pair. This value is optional.
description: |-
In case of authentication enabled for the given data store, specifies the username and password pair.
This value is optional.
properties:
password:
properties:
content:
description: Bare content of the file, base64 encoded. It has precedence over the SecretReference value.
description: |-
Bare content of the file, base64 encoded.
It has precedence over the SecretReference value.
format: byte
type: string
secretReference:
properties:
keyPath:
description: Name of the key for the given Secret reference where the content is stored. This value is mandatory.
description: |-
Name of the key for the given Secret reference where the content is stored.
This value is mandatory.
minLength: 1
type: string
name:
@ -69,13 +84,17 @@ spec:
username:
properties:
content:
description: Bare content of the file, base64 encoded. It has precedence over the SecretReference value.
description: |-
Bare content of the file, base64 encoded.
It has precedence over the SecretReference value.
format: byte
type: string
secretReference:
properties:
keyPath:
description: Name of the key for the given Secret reference where the content is stored. This value is mandatory.
description: |-
Name of the key for the given Secret reference where the content is stored.
This value is mandatory.
minLength: 1
type: string
name:
@ -101,7 +120,9 @@ spec:
- PostgreSQL
type: string
endpoints:
description: List of the endpoints to connect to the shared datastore. No need for protocol, just bare IP/FQDN and port.
description: |-
List of the endpoints to connect to the shared datastore.
No need for protocol, just bare IP/FQDN and port.
items:
type: string
minItems: 1
@ -110,18 +131,24 @@ spec:
description: Defines the TLS/SSL configuration required to connect to the data store in a secure way.
properties:
certificateAuthority:
description: Retrieve the Certificate Authority certificate and private key, such as bare content of the file, or a SecretReference. The key reference is required since etcd authentication is based on certificates, and Kamaji is responsible in creating this.
description: |-
Retrieve the Certificate Authority certificate and private key, such as bare content of the file, or a SecretReference.
The key reference is required since etcd authentication is based on certificates, and Kamaji is responsible in creating this.
properties:
certificate:
properties:
content:
description: Bare content of the file, base64 encoded. It has precedence over the SecretReference value.
description: |-
Bare content of the file, base64 encoded.
It has precedence over the SecretReference value.
format: byte
type: string
secretReference:
properties:
keyPath:
description: Name of the key for the given Secret reference where the content is stored. This value is mandatory.
description: |-
Name of the key for the given Secret reference where the content is stored.
This value is mandatory.
minLength: 1
type: string
name:
@ -138,13 +165,17 @@ spec:
privateKey:
properties:
content:
description: Bare content of the file, base64 encoded. It has precedence over the SecretReference value.
description: |-
Bare content of the file, base64 encoded.
It has precedence over the SecretReference value.
format: byte
type: string
secretReference:
properties:
keyPath:
description: Name of the key for the given Secret reference where the content is stored. This value is mandatory.
description: |-
Name of the key for the given Secret reference where the content is stored.
This value is mandatory.
minLength: 1
type: string
name:
@ -167,13 +198,17 @@ spec:
certificate:
properties:
content:
description: Bare content of the file, base64 encoded. It has precedence over the SecretReference value.
description: |-
Bare content of the file, base64 encoded.
It has precedence over the SecretReference value.
format: byte
type: string
secretReference:
properties:
keyPath:
description: Name of the key for the given Secret reference where the content is stored. This value is mandatory.
description: |-
Name of the key for the given Secret reference where the content is stored.
This value is mandatory.
minLength: 1
type: string
name:
@ -190,13 +225,17 @@ spec:
privateKey:
properties:
content:
description: Bare content of the file, base64 encoded. It has precedence over the SecretReference value.
description: |-
Bare content of the file, base64 encoded.
It has precedence over the SecretReference value.
format: byte
type: string
secretReference:
properties:
keyPath:
description: Name of the key for the given Secret reference where the content is stored. This value is mandatory.
description: |-
Name of the key for the given Secret reference where the content is stored.
This value is mandatory.
minLength: 1
type: string
name:

File diff suppressed because it is too large Load Diff

View File

@ -4,7 +4,7 @@ annotations:
catalog.cattle.io/kube-version: '>=1.19.0-0'
catalog.cattle.io/release-name: dynatrace-operator
apiVersion: v2
appVersion: 1.0.0
appVersion: 1.0.1
description: The Dynatrace Operator Helm chart for Kubernetes and OpenShift
home: https://www.dynatrace.com/
icon: https://assets.dynatrace.com/global/resources/Signet_Logo_RGB_CP_512x512px.png
@ -20,4 +20,4 @@ name: dynatrace-operator
sources:
- https://github.com/Dynatrace/dynatrace-operator
type: application
version: 1.0.0
version: 1.0.1

View File

@ -41,7 +41,7 @@ spec:
name: v1alpha1
schema:
openAPIV3Schema:
description: DynaKube is the Schema for the DynaKube API
description: DynaKube is the Schema for the DynaKube API.
properties:
apiVersion:
description: |-
@ -1084,7 +1084,7 @@ spec:
name: v1beta1
schema:
openAPIV3Schema:
description: DynaKube is the Schema for the DynaKube API
description: DynaKube is the Schema for the DynaKube API.
properties:
apiVersion:
description: |-

View File

@ -4,7 +4,7 @@ annotations:
catalog.cattle.io/kube-version: '>= 1.19.0-0'
catalog.cattle.io/release-name: external-secrets
apiVersion: v2
appVersion: v0.9.14
appVersion: v0.9.16
description: External secret management for Kubernetes
home: https://github.com/external-secrets/external-secrets
icon: https://raw.githubusercontent.com/external-secrets/external-secrets/main/assets/eso-logo-large.png
@ -17,4 +17,4 @@ maintainers:
name: mcavoyk
name: external-secrets
type: application
version: 0.9.14
version: 0.9.16

View File

@ -4,7 +4,7 @@
[//]: # (README.md generated by gotmpl. DO NOT EDIT.)
![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.9.14](https://img.shields.io/badge/Version-0.9.14-informational?style=flat-square)
![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.9.16](https://img.shields.io/badge/Version-0.9.16-informational?style=flat-square)
External secret management for Kubernetes
@ -139,6 +139,8 @@ The command removes all the Kubernetes components associated with the chart and
| securityContext.runAsNonRoot | bool | `true` | |
| securityContext.runAsUser | int | `1000` | |
| securityContext.seccompProfile.type | string | `"RuntimeDefault"` | |
| service.ipFamilies | list | `[]` | Sets the families that should be supported and the order in which they should be applied to ClusterIP as well. Can be IPv4 and/or IPv6. |
| service.ipFamilyPolicy | string | `""` | Set the ip family policy to configure dual-stack see [Configure dual-stack](https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services) |
| serviceAccount.annotations | object | `{}` | Annotations to add to the service account. |
| serviceAccount.automount | bool | `true` | Automounts the service account token in all containers of the pod |
| serviceAccount.create | bool | `true` | Specifies whether a service account should be created. |

View File

@ -12,6 +12,12 @@ metadata:
{{- end }}
spec:
type: ClusterIP
{{- if .Values.service.ipFamilyPolicy }}
ipFamilyPolicy: {{ .Values.service.ipFamilyPolicy }}
{{- end }}
{{- if .Values.service.ipFamilies }}
ipFamilies: {{ .Values.service.ipFamilies | toYaml | nindent 2 }}
{{- end }}
ports:
- port: {{ .Values.certController.metrics.service.port }}
protocol: TCP

View File

@ -498,7 +498,9 @@ spec:
type: object
type: object
namespaceSelector:
description: The labels to select by to find the Namespaces to create the ExternalSecrets in.
description: |-
The labels to select by to find the Namespaces to create the ExternalSecrets in.
Deprecated: Use NamespaceSelectors instead.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
@ -539,6 +541,54 @@ spec:
type: object
type: object
x-kubernetes-map-type: atomic
namespaceSelectors:
description: A list of labels to select by to find the Namespaces to create the ExternalSecrets in. The selectors are ORed.
items:
description: |-
A label selector is a label query over a set of resources. The result of matchLabels and
matchExpressions are ANDed. An empty label selector matches all objects. A null
label selector matches no objects.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
items:
description: |-
A label selector requirement is a selector that contains values, a key, and an operator that
relates the key and values.
properties:
key:
description: key is the label key that the selector applies to.
type: string
operator:
description: |-
operator represents a key's relationship to a set of values.
Valid operators are In, NotIn, Exists and DoesNotExist.
type: string
values:
description: |-
values is an array of string values. If the operator is In or NotIn,
the values array must be non-empty. If the operator is Exists or DoesNotExist,
the values array must be empty. This array is replaced during a strategic
merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: |-
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions, whose key field is "key", the
operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
type: array
namespaces:
description: Choose namespaces by name. This field is ORed with anything that NamespaceSelector ends up choosing.
items:

View File

@ -2012,10 +2012,10 @@ spec:
description: AzureKV configures this store to sync secrets using Azure Key Vault provider
properties:
authSecretRef:
description: Auth configures how the operator authenticates with Azure. Required for ServicePrincipal auth type.
description: Auth configures how the operator authenticates with Azure. Required for ServicePrincipal auth type. Optional for WorkloadIdentity.
properties:
clientId:
description: The Azure clientId of the service principle used for authentication.
description: The Azure clientId of the service principle or managed identity used for authentication.
properties:
key:
description: |-
@ -2048,6 +2048,23 @@ spec:
to the namespace of the referent.
type: string
type: object
tenantId:
description: The Azure tenantId of the managed identity used for authentication.
properties:
key:
description: |-
The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be
defaulted, in others it may be required.
type: string
name:
description: The name of the Secret resource being referred to.
type: string
namespace:
description: |-
Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults
to the namespace of the referent.
type: string
type: object
type: object
authType:
default: ServicePrincipal
@ -2102,7 +2119,7 @@ spec:
- name
type: object
tenantId:
description: TenantID configures the Azure Tenant to send requests to. Required for ServicePrincipal auth type.
description: TenantID configures the Azure Tenant to send requests to. Required for ServicePrincipal auth type. Optional for WorkloadIdentity.
type: string
vaultUrl:
description: Vault Url from which the secrets to be fetched from.
@ -3040,6 +3057,60 @@ spec:
- region
- vault
type: object
passbolt:
properties:
auth:
description: Auth defines the information necessary to authenticate against Passbolt Server
properties:
passwordSecretRef:
description: |-
A reference to a specific 'key' within a Secret resource,
In some instances, `key` is a required field.
properties:
key:
description: |-
The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be
defaulted, in others it may be required.
type: string
name:
description: The name of the Secret resource being referred to.
type: string
namespace:
description: |-
Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults
to the namespace of the referent.
type: string
type: object
privateKeySecretRef:
description: |-
A reference to a specific 'key' within a Secret resource,
In some instances, `key` is a required field.
properties:
key:
description: |-
The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be
defaulted, in others it may be required.
type: string
name:
description: The name of the Secret resource being referred to.
type: string
namespace:
description: |-
Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults
to the namespace of the referent.
type: string
type: object
required:
- passwordSecretRef
- privateKeySecretRef
type: object
host:
description: Host defines the Passbolt Server to connect to
type: string
required:
- auth
- host
type: object
passworddepot:
description: Configures a store to sync secrets with a Password Depot instance.
properties:

View File

@ -0,0 +1,110 @@
{{- if .Values.installCRDs }}
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
{{- with .Values.crds.annotations }}
{{- toYaml . | nindent 4}}
{{- end }}
{{- if and .Values.crds.conversion.enabled .Values.webhook.certManager.enabled .Values.webhook.certManager.addInjectorAnnotations }}
cert-manager.io/inject-ca-from: {{ .Release.Namespace }}/{{ include "external-secrets.fullname" . }}-webhook
{{- end }}
controller-gen.kubebuilder.io/version: v0.14.0
name: githubaccesstokens.generators.external-secrets.io
spec:
group: generators.external-secrets.io
names:
categories:
- githubaccesstoken
kind: GithubAccessToken
listKind: GithubAccessTokenList
plural: githubaccesstokens
shortNames:
- githubaccesstoken
singular: githubaccesstoken
scope: Namespaced
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
description: GithubAccessToken generates ghs_ accessToken
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
properties:
appID:
type: string
auth:
description: Auth configures how ESO authenticates with a Github instance.
properties:
privatKey:
properties:
secretRef:
description: |-
A reference to a specific 'key' within a Secret resource,
In some instances, `key` is a required field.
properties:
key:
description: |-
The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be
defaulted, in others it may be required.
type: string
name:
description: The name of the Secret resource being referred to.
type: string
namespace:
description: |-
Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults
to the namespace of the referent.
type: string
type: object
required:
- secretRef
type: object
required:
- privatKey
type: object
installID:
type: string
url:
description: URL configures the Github instance URL. Defaults to https://github.com/.
type: string
required:
- appID
- auth
- installID
type: object
type: object
served: true
storage: true
subresources:
status: {}
{{- if .Values.crds.conversion.enabled }}
conversion:
strategy: Webhook
webhook:
conversionReviewVersions:
- v1
clientConfig:
service:
name: {{ include "external-secrets.fullname" . }}-webhook
namespace: {{ .Release.Namespace | quote }}
path: /convert
{{- end }}
{{- end }}

View File

@ -57,6 +57,13 @@ spec:
description: Secret Data that should be pushed to providers
items:
properties:
conversionStrategy:
default: None
description: Used to define a conversion Strategy for the secret keys
enum:
- None
- ReverseUnicode
type: string
match:
description: Match a given Secret Key to be pushed to the provider.
properties:
@ -312,6 +319,13 @@ spec:
additionalProperties:
additionalProperties:
properties:
conversionStrategy:
default: None
description: Used to define a conversion Strategy for the secret keys
enum:
- None
- ReverseUnicode
type: string
match:
description: Match a given Secret Key to be pushed to the provider.
properties:

View File

@ -2012,10 +2012,10 @@ spec:
description: AzureKV configures this store to sync secrets using Azure Key Vault provider
properties:
authSecretRef:
description: Auth configures how the operator authenticates with Azure. Required for ServicePrincipal auth type.
description: Auth configures how the operator authenticates with Azure. Required for ServicePrincipal auth type. Optional for WorkloadIdentity.
properties:
clientId:
description: The Azure clientId of the service principle used for authentication.
description: The Azure clientId of the service principle or managed identity used for authentication.
properties:
key:
description: |-
@ -2048,6 +2048,23 @@ spec:
to the namespace of the referent.
type: string
type: object
tenantId:
description: The Azure tenantId of the managed identity used for authentication.
properties:
key:
description: |-
The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be
defaulted, in others it may be required.
type: string
name:
description: The name of the Secret resource being referred to.
type: string
namespace:
description: |-
Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults
to the namespace of the referent.
type: string
type: object
type: object
authType:
default: ServicePrincipal
@ -2102,7 +2119,7 @@ spec:
- name
type: object
tenantId:
description: TenantID configures the Azure Tenant to send requests to. Required for ServicePrincipal auth type.
description: TenantID configures the Azure Tenant to send requests to. Required for ServicePrincipal auth type. Optional for WorkloadIdentity.
type: string
vaultUrl:
description: Vault Url from which the secrets to be fetched from.
@ -3040,6 +3057,60 @@ spec:
- region
- vault
type: object
passbolt:
properties:
auth:
description: Auth defines the information necessary to authenticate against Passbolt Server
properties:
passwordSecretRef:
description: |-
A reference to a specific 'key' within a Secret resource,
In some instances, `key` is a required field.
properties:
key:
description: |-
The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be
defaulted, in others it may be required.
type: string
name:
description: The name of the Secret resource being referred to.
type: string
namespace:
description: |-
Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults
to the namespace of the referent.
type: string
type: object
privateKeySecretRef:
description: |-
A reference to a specific 'key' within a Secret resource,
In some instances, `key` is a required field.
properties:
key:
description: |-
The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be
defaulted, in others it may be required.
type: string
name:
description: The name of the Secret resource being referred to.
type: string
namespace:
description: |-
Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults
to the namespace of the referent.
type: string
type: object
required:
- passwordSecretRef
- privateKeySecretRef
type: object
host:
description: Host defines the Passbolt Server to connect to
type: string
required:
- auth
- host
type: object
passworddepot:
description: Configures a store to sync secrets with a Password Depot instance.
properties:

View File

@ -53,8 +53,10 @@ rules:
- "ecrauthorizationtokens"
- "fakes"
- "gcraccesstokens"
- "githubaccesstokens"
- "passwords"
- "vaultdynamicsecrets"
- "webhooks"
verbs:
- "get"
- "list"
@ -145,8 +147,10 @@ rules:
- "ecrauthorizationtokens"
- "fakes"
- "gcraccesstokens"
- "githubaccesstokens"
- "passwords"
- "vaultdynamicsecrets"
- "webhooks"
verbs:
- "get"
- "watch"
@ -188,8 +192,10 @@ rules:
- "ecrauthorizationtokens"
- "fakes"
- "gcraccesstokens"
- "githubaccesstokens"
- "passwords"
- "vaultdynamicsecrets"
- "webhooks"
verbs:
- "create"
- "delete"

View File

@ -12,6 +12,12 @@ metadata:
{{- end }}
spec:
type: ClusterIP
{{- if .Values.service.ipFamilyPolicy }}
ipFamilyPolicy: {{ .Values.service.ipFamilyPolicy }}
{{- end }}
{{- if .Values.service.ipFamilies }}
ipFamilies: {{ .Values.service.ipFamilies | toYaml | nindent 2 }}
{{- end }}
ports:
- port: {{ .Values.metrics.service.port }}
protocol: TCP

View File

@ -8,6 +8,12 @@ metadata:
{{- include "external-secrets.labels" . | nindent 4 }}
spec:
type: ClusterIP
{{- if .Values.service.ipFamilyPolicy }}
ipFamilyPolicy: {{ .Values.service.ipFamilyPolicy }}
{{- end }}
{{- if .Values.service.ipFamilies }}
ipFamilies: {{ .Values.service.ipFamilies | toYaml | nindent 2 }}
{{- end }}
ports:
- port: {{ .Values.metrics.service.port }}
protocol: TCP
@ -56,6 +62,12 @@ metadata:
{{- include "external-secrets-webhook-metrics.labels" . | nindent 4 }}
spec:
type: ClusterIP
{{- if .Values.service.ipFamilyPolicy }}
ipFamilyPolicy: {{ .Values.service.ipFamilyPolicy }}
{{- end }}
{{- if .Values.service.ipFamilies }}
ipFamilies: {{ .Values.service.ipFamilies | toYaml | nindent 2 }}
{{- end }}
ports:
- port: {{ .Values.webhook.metrics.service.port }}
protocol: TCP
@ -105,6 +117,12 @@ metadata:
{{- include "external-secrets-cert-controller-metrics.labels" . | nindent 4 }}
spec:
type: ClusterIP
{{- if .Values.service.ipFamilyPolicy }}
ipFamilyPolicy: {{ .Values.service.ipFamilyPolicy }}
{{- end }}
{{- if .Values.service.ipFamilies }}
ipFamilies: {{ .Values.service.ipFamilies | toYaml | nindent 2 }}
{{- end }}
ports:
- port: {{ .Values.certController.metrics.listen.port }}
protocol: TCP

View File

@ -15,6 +15,12 @@ metadata:
{{- end }}
spec:
type: ClusterIP
{{- if .Values.service.ipFamilyPolicy }}
ipFamilyPolicy: {{ .Values.service.ipFamilyPolicy }}
{{- end }}
{{- if .Values.service.ipFamilies }}
ipFamilies: {{ .Values.service.ipFamilies | toYaml | nindent 2 }}
{{- end }}
ports:
- port: 443
targetPort: {{ .Values.webhook.port }}

View File

@ -7,8 +7,8 @@ should match snapshot of default values:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: external-secrets-cert-controller
app.kubernetes.io/version: v0.9.14
helm.sh/chart: external-secrets-0.9.14
app.kubernetes.io/version: v0.9.16
helm.sh/chart: external-secrets-0.9.16
name: RELEASE-NAME-external-secrets-cert-controller
namespace: NAMESPACE
spec:
@ -24,8 +24,8 @@ should match snapshot of default values:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: external-secrets-cert-controller
app.kubernetes.io/version: v0.9.14
helm.sh/chart: external-secrets-0.9.14
app.kubernetes.io/version: v0.9.16
helm.sh/chart: external-secrets-0.9.16
spec:
automountServiceAccountToken: true
containers:
@ -38,7 +38,7 @@ should match snapshot of default values:
- --secret-namespace=NAMESPACE
- --metrics-addr=:8080
- --healthz-addr=:8081
image: ghcr.io/external-secrets/external-secrets:v0.9.14
image: ghcr.io/external-secrets/external-secrets:v0.9.16
imagePullPolicy: IfNotPresent
name: cert-controller
ports:

View File

@ -7,8 +7,8 @@ should match snapshot of default values:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: external-secrets
app.kubernetes.io/version: v0.9.14
helm.sh/chart: external-secrets-0.9.14
app.kubernetes.io/version: v0.9.16
helm.sh/chart: external-secrets-0.9.16
name: RELEASE-NAME-external-secrets
namespace: NAMESPACE
spec:
@ -24,15 +24,15 @@ should match snapshot of default values:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: external-secrets
app.kubernetes.io/version: v0.9.14
helm.sh/chart: external-secrets-0.9.14
app.kubernetes.io/version: v0.9.16
helm.sh/chart: external-secrets-0.9.16
spec:
automountServiceAccountToken: true
containers:
- args:
- --concurrent=1
- --metrics-addr=:8080
image: ghcr.io/external-secrets/external-secrets:v0.9.14
image: ghcr.io/external-secrets/external-secrets:v0.9.16
imagePullPolicy: IfNotPresent
name: external-secrets
ports:

View File

@ -2017,10 +2017,10 @@ should match snapshot of default values:
description: AzureKV configures this store to sync secrets using Azure Key Vault provider
properties:
authSecretRef:
description: Auth configures how the operator authenticates with Azure. Required for ServicePrincipal auth type.
description: Auth configures how the operator authenticates with Azure. Required for ServicePrincipal auth type. Optional for WorkloadIdentity.
properties:
clientId:
description: The Azure clientId of the service principle used for authentication.
description: The Azure clientId of the service principle or managed identity used for authentication.
properties:
key:
description: |-
@ -2053,6 +2053,23 @@ should match snapshot of default values:
to the namespace of the referent.
type: string
type: object
tenantId:
description: The Azure tenantId of the managed identity used for authentication.
properties:
key:
description: |-
The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be
defaulted, in others it may be required.
type: string
name:
description: The name of the Secret resource being referred to.
type: string
namespace:
description: |-
Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults
to the namespace of the referent.
type: string
type: object
type: object
authType:
default: ServicePrincipal
@ -2107,7 +2124,7 @@ should match snapshot of default values:
- name
type: object
tenantId:
description: TenantID configures the Azure Tenant to send requests to. Required for ServicePrincipal auth type.
description: TenantID configures the Azure Tenant to send requests to. Required for ServicePrincipal auth type. Optional for WorkloadIdentity.
type: string
vaultUrl:
description: Vault Url from which the secrets to be fetched from.
@ -3045,6 +3062,60 @@ should match snapshot of default values:
- region
- vault
type: object
passbolt:
properties:
auth:
description: Auth defines the information necessary to authenticate against Passbolt Server
properties:
passwordSecretRef:
description: |-
A reference to a specific 'key' within a Secret resource,
In some instances, `key` is a required field.
properties:
key:
description: |-
The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be
defaulted, in others it may be required.
type: string
name:
description: The name of the Secret resource being referred to.
type: string
namespace:
description: |-
Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults
to the namespace of the referent.
type: string
type: object
privateKeySecretRef:
description: |-
A reference to a specific 'key' within a Secret resource,
In some instances, `key` is a required field.
properties:
key:
description: |-
The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be
defaulted, in others it may be required.
type: string
name:
description: The name of the Secret resource being referred to.
type: string
namespace:
description: |-
Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults
to the namespace of the referent.
type: string
type: object
required:
- passwordSecretRef
- privateKeySecretRef
type: object
host:
description: Host defines the Passbolt Server to connect to
type: string
required:
- auth
- host
type: object
passworddepot:
description: Configures a store to sync secrets with a Password Depot instance.
properties:

View File

@ -7,8 +7,8 @@ should match snapshot of default values:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: external-secrets-webhook
app.kubernetes.io/version: v0.9.14
helm.sh/chart: external-secrets-0.9.14
app.kubernetes.io/version: v0.9.16
helm.sh/chart: external-secrets-0.9.16
name: RELEASE-NAME-external-secrets-webhook
namespace: NAMESPACE
spec:
@ -24,8 +24,8 @@ should match snapshot of default values:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: external-secrets-webhook
app.kubernetes.io/version: v0.9.14
helm.sh/chart: external-secrets-0.9.14
app.kubernetes.io/version: v0.9.16
helm.sh/chart: external-secrets-0.9.16
spec:
automountServiceAccountToken: true
containers:
@ -37,7 +37,7 @@ should match snapshot of default values:
- --check-interval=5m
- --metrics-addr=:8080
- --healthz-addr=:8081
image: ghcr.io/external-secrets/external-secrets:v0.9.14
image: ghcr.io/external-secrets/external-secrets:v0.9.16
imagePullPolicy: IfNotPresent
name: webhook
ports:
@ -81,8 +81,8 @@ should match snapshot of default values:
app.kubernetes.io/instance: RELEASE-NAME
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: external-secrets-webhook
app.kubernetes.io/version: v0.9.14
app.kubernetes.io/version: v0.9.16
external-secrets.io/component: webhook
helm.sh/chart: external-secrets-0.9.14
helm.sh/chart: external-secrets-0.9.16
name: RELEASE-NAME-external-secrets-webhook
namespace: NAMESPACE

View File

@ -78,6 +78,12 @@ createOperator: true
# a time.
concurrent: 1
service:
# -- Set the ip family policy to configure dual-stack see [Configure dual-stack](https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services)
ipFamilyPolicy: ""
# -- Sets the families that should be supported and the order in which they should be applied to ClusterIP as well. Can be IPv4 and/or IPv6.
ipFamilies: []
serviceAccount:
# -- Specifies whether a service account should be created.
create: true

View File

@ -12,6 +12,10 @@ Use the following links to reference issues, PRs, and commits prior to v2.6.0.
The changelog until v1.5.7 was auto-generated based on git commits.
Those entries include a reference to the git commit to be able to get more details.
## 5.1.6
Update `jenkins/jenkins` to version `2.440.3-jdk17`
## 5.1.5
Fix Prometheus controller name.

View File

@ -1,10 +1,10 @@
annotations:
artifacthub.io/category: integration-delivery
artifacthub.io/changes: |
- Fix Prometheus controller name.
- Update `jenkins/jenkins` to version `2.440.3-jdk17`
artifacthub.io/images: |
- name: jenkins
image: docker.io/jenkins/jenkins:2.440.2-jdk17
image: docker.io/jenkins/jenkins:2.440.3-jdk17
- name: k8s-sidecar
image: docker.io/kiwigrid/k8s-sidecar:1.26.1
- name: inbound-agent
@ -22,7 +22,7 @@ annotations:
catalog.cattle.io/kube-version: '>=1.14-0'
catalog.cattle.io/release-name: jenkins
apiVersion: v2
appVersion: 2.440.2
appVersion: 2.440.3
description: 'Jenkins - Build great things at any scale! As the leading open source
automation server, Jenkins provides over 1800 plugins to support building, deploying
and automating any project. '
@ -50,4 +50,4 @@ sources:
- https://github.com/maorfr/kube-tasks
- https://github.com/jenkinsci/configuration-as-code-plugin
type: application
version: 5.1.5
version: 5.1.6

View File

@ -7,9 +7,9 @@ annotations:
catalog.cattle.io/featured: "1"
catalog.cattle.io/release-name: cost-analyzer
apiVersion: v2
appVersion: 2.2.1
appVersion: 2.2.2
description: A Helm chart that sets up Kubecost, Prometheus, and Grafana to monitor
cloud costs.
icon: https://partner-charts.rancher.io/assets/logos/kubecost.png
name: cost-analyzer
version: 2.2.1
version: 2.2.2

View File

@ -64,14 +64,6 @@ Kubecost 2.0 preconditions
{{- fail "\n\nYou are attempting to upgrade to Kubecost 2.x.\nKubecost no longer includes Thanos by default. \nPlease see https://docs.kubecost.com/install-and-configure/install/kubecostv2 for more information.\nIf you have any questions or concerns, please reach out to us at product@kubecost.com" -}}
{{- end -}}
{{- if or (((.Values.global).amp).enabled) (((.Values.global).gmp).enabled) (((.Values.global).thanos).queryService) (((.Values.global).mimirProxy).enabled) -}}
{{- if (not (.Values.federatedETL).federatedCluster) -}}
{{- if (not (.Values.upgrade).toV2) -}}
{{- fail "\n\nMulti-Cluster-Prometheus Error:\nYou are attempting to upgrade to Kubecost 2.x\nSupport for multi-cluster Prometheus (Thanos/AMP/GMP/mimir/etc) without using `Kubecost Federated ETL Object Storage` will be added in future release. \nIf this is a single cluster Kubecost environment, upgrading is supported using a flag to acknowledge this change.\nMore information can be found here: \nhttps://docs.kubecost.com/install-and-configure/install/kubecostv2\nIf you have any questions or concerns, please reach out to us at product@kubecost.com\n\nWhen ready to upgrade, add `--set upgrade.toV2=true`." -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- if or ((.Values.saml).rbac).enabled ((.Values.oidc).rbac).enabled -}}
{{- if (not (.Values.upgrade).toV2) -}}
{{- fail "\n\nSSO with RBAC is enabled.\nNote that Kubecost 2.x has significant architectural changes that may impact RBAC.\nThis should be tested before giving end-users access to the UI.\nKubecost has tested various configurations and believe that 2.x will be 100% compatible with existing configurations.\nRefer to the following documentation for more information: https://docs.kubecost.com/install-and-configure/install/kubecostv2\n\nWhen ready to upgrade, add `--set upgrade.toV2=true`." -}}
@ -83,9 +75,6 @@ Kubecost 2.0 preconditions
{{- end -}}
{{- if (.Values.agent) -}}
{{- fail "\n\nKubecost 2.0 Does not support Thanos based agents. For Thanos, please continue to use 1.108.x.\nConsider moving to Kubecost Federated ETL based agents.\nRefer to the following documentation for more information: https://docs.kubecost.com/install-and-configure/install/kubecostv2\nSupport for Thanos agents is under consideration.\nIf you have any questions or concerns, please reach out to us at product@kubecost.com" -}}
{{- end -}}
{{- if .Values.kubecostModel.openSourceOnly -}}
{{- fail "In Kubecost 2.0, kubecostModel.openSourceOnly is not supported" -}}
{{- end -}}
@ -1018,6 +1007,8 @@ Begin Kubecost 2.0 templates
value: "false" # this container should never run KC's concept of "ETL"
- name: CLOUD_PROVIDER_API_KEY
value: "AIzaSyDXQPG_MHUEy9neR7stolq6l0ujXmjJlvk" # The GCP Pricing API key.This GCP api key is expected to be here and is limited to accessing google's billing API.'
- name: READ_ONLY
value: {{ (quote .Values.readonly) | default (quote false) }}
{{- if .Values.systemProxy.enabled }}
- name: HTTP_PROXY
value: {{ .Values.systemProxy.httpProxyUrl }}
@ -1216,6 +1207,8 @@ Begin Kubecost 2.0 templates
- name: FEDERATED_CLUSTER
value: "true"
{{- end}}
- name: ETL_DAILY_STORE_DURATION_DAYS
value: {{ (quote .Values.kubecostModel.etlDailyStoreDurationDays) | default (quote 91) }}
- name: CLOUD_COST_REFRESH_RATE_HOURS
value: {{ .Values.kubecostAggregator.cloudCost.refreshRateHours | default 6 | quote }}
- name: CLOUD_COST_QUERY_WINDOW_DAYS

View File

@ -586,7 +586,8 @@ spec:
- name: azure-storage-config
mountPath: /var/azure-storage-config
{{- end }}
{{- if or (.Values.kubecostProductConfigs.cloudIntegrationSecret) (.Values.kubecostProductConfigs.cloudIntegrationJSON) }}
# TODO remove this if-clause when CloudCost has been removed from Opencost Cost-Model
{{- if or (.Values.kubecostProductConfigs).cloudIntegrationSecret (.Values.kubecostProductConfigs).cloudIntegrationJSON ((.Values.kubecostProductConfigs).athenaBucketName) }}
- name: cloud-integration
mountPath: /var/configs/cloud-integration
{{- end }}

View File

@ -368,7 +368,7 @@ systemProxy:
kubecostFrontend:
enabled: true
deployMethod: singlepod haMode or singlepod - haMode is currently only supported with Enterprise tier
deployMethod: singlepod # haMode or singlepod - haMode is currently only supported with Enterprise tier
haReplicas: 2 # only used with haMode
image: "gcr.io/kubecost1/frontend"
imagePullPolicy: Always
@ -3122,7 +3122,8 @@ costEventsAudit:
enabled: false
## Disable updates to kubecost from the frontend UI and via POST request
##
## This feature is considered beta, entrprise users should use teams:
## https://docs.kubecost.com/using-kubecost/navigating-the-kubecost-ui/teams
# readonly: false
# # These configs can also be set from the Settings page in the Kubecost product

View File

@ -4,7 +4,7 @@ annotations:
catalog.cattle.io/namespace: kuma-system
catalog.cattle.io/release-name: kuma
apiVersion: v2
appVersion: 2.6.5
appVersion: 2.7.0
description: A Helm chart for the Kuma Control Plane
home: https://github.com/kumahq/kuma
icon: https://kuma.io/assets/images/brand/kuma-logo-new.svg
@ -12,12 +12,15 @@ keywords:
- service mesh
- control plane
maintainers:
- email: austin.cawley@gmail.com
name: austince
- email: jakub.dyszkiewicz@konghq.com
name: jakubdyszkiewicz
- email: nikolay.nikolaev@konghq.com
name: nickolaev
name: Jakub Dyszkiewicz
url: https://github.com/jakubdyszkiewicz
- email: charly.molter@konghq.com
name: Charly Molter
url: https://github.com/lahabana
- email: michael.beaumont@konghq.com
name: Mike Beaumont
url: https://github.com/michaelbeaumont
name: kuma
type: application
version: 2.6.5
version: 2.7.0

View File

@ -2,7 +2,7 @@
A Helm chart for the Kuma Control Plane
![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 2.6.5](https://img.shields.io/badge/Version-2.6.5-informational?style=flat-square) ![AppVersion: 2.6.5](https://img.shields.io/badge/AppVersion-2.6.5-informational?style=flat-square)
![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 2.7.0](https://img.shields.io/badge/Version-2.7.0-informational?style=flat-square) ![AppVersion: 2.7.0](https://img.shields.io/badge/AppVersion-2.7.0-informational?style=flat-square)
**Homepage:** <https://github.com/kumahq/kuma>
@ -198,14 +198,14 @@ A Helm chart for the Kuma Control Plane
| hooks.ebpfCleanup | object | `{"containerSecurityContext":{"readOnlyRootFilesystem":false},"podSecurityContext":{"runAsNonRoot":false}}` | ebpf-cleanup hook needs write access to the root filesystem to clean ebpf programs Changing below values will potentially break ebpf cleanup completely, so be cautious when doing so. |
| hooks.ebpfCleanup.podSecurityContext | object | `{"runAsNonRoot":false}` | Security context at the pod level for crd/webhook/cleanup-ebpf |
| hooks.ebpfCleanup.containerSecurityContext | object | `{"readOnlyRootFilesystem":false}` | Security context at the container level for crd/webhook/cleanup-ebpf |
| experimental.gatewayAPI | bool | `false` | If true, it installs experimental Gateway API support |
| experimental.ebpf.enabled | bool | `false` | If true, ebpf will be used instead of using iptables to install/configure transparent proxy |
| experimental.ebpf.instanceIPEnvVarName | string | `"INSTANCE_IP"` | Name of the environmental variable which will contain the IP address of a pod |
| experimental.ebpf.bpffsPath | string | `"/sys/fs/bpf"` | Path where BPF file system should be mounted |
| experimental.ebpf.cgroupPath | string | `"/sys/fs/cgroup"` | Host's cgroup2 path |
| experimental.ebpf.tcAttachIface | string | `""` | Name of the network interface which TC programs should be attached to, we'll try to automatically determine it if empty |
| experimental.ebpf.programsSourcePath | string | `"/kuma/ebpf"` | Path where compiled eBPF programs which will be installed can be found |
| experimental.ebpf.programsSourcePath | string | `"/tmp/kuma-ebpf"` | Path where compiled eBPF programs which will be installed can be found |
| experimental.deltaKds | bool | `true` | If false, it uses legacy API for resource synchronization |
| experimental.sidecarContainers | bool | `false` | If true, enable native Kubernetes sidecars. This requires at least Kubernetes v1.29 |
| postgres.port | string | `"5432"` | Postgres port, password should be provided as a secret reference in "controlPlane.secrets" with the Env value "KUMA_STORE_POSTGRES_PASSWORD". Example: controlPlane: secrets: - Secret: postgres-postgresql Key: postgresql-password Env: KUMA_STORE_POSTGRES_PASSWORD |
| postgres.tls.mode | string | `"disable"` | Mode of TLS connection. Available values are: "disable", "verifyNone", "verifyCa", "verifyFull" |
| postgres.tls.disableSSLSNI | bool | `false` | Whether to disable SNI the postgres `sslsni` option. |

View File

@ -140,6 +140,11 @@ spec:
Name of the referenced resource. Can only be used with kinds: `MeshService`,
`MeshServiceSubset` and `MeshGatewayRoute`
type: string
port:
description: Port is only supported when this
ref refers to a real MeshService object
format: int32
type: integer
proxyTypes:
description: |-
ProxyTypes specifies the data plane types that are subject to the policy. When not specified,
@ -220,9 +225,7 @@ spec:
requestMirror:
properties:
backendRef:
description: TargetRef defines structure
that allows attaching policy to various
objects
description: TODO forbid weight
properties:
kind:
description: Kind of the referenced
@ -244,6 +247,12 @@ spec:
Name of the referenced resource. Can only be used with kinds: `MeshService`,
`MeshServiceSubset` and `MeshGatewayRoute`
type: string
port:
description: Port is only supported
when this ref refers to a real MeshService
object
format: int32
type: integer
proxyTypes:
description: |-
ProxyTypes specifies the data plane types that are subject to the policy. When not specified,
@ -262,6 +271,10 @@ spec:
Tags used to select a subset of proxies by tags. Can only be used with kinds
`MeshSubset` and `MeshServiceSubset`
type: object
weight:
default: 1
minimum: 0
type: integer
type: object
percentage:
anyOf:

View File

@ -85,6 +85,10 @@ spec:
endpoint:
description: Endpoint for OpenTelemetry collector
type: string
refreshInterval:
description: RefreshInterval defines how frequent metrics
should be pushed to collector
type: string
required:
- endpoint
type: object
@ -146,10 +150,76 @@ spec:
at least once, gauges changed at least once, and histograms added to at
least once). If true will scrape all metrics (even the ones with zeros).
type: boolean
regex:
description: Regex that will be used to filter sidecar metrics.
It uses Google RE2 engine https://github.com/google/re2
type: string
profiles:
description: Profiles allows to customize which metrics are
published.
properties:
appendProfiles:
description: AppendProfiles allows to combine the metrics
from multiple predefined profiles.
items:
properties:
name:
description: 'Name of the predefined profile, one
of: all, basic, none'
enum:
- All
- Basic
- None
type: string
required:
- name
type: object
type: array
exclude:
description: |-
Exclude makes it possible to exclude groups of metrics from a resulting profile.
Exclude is subordinate to Include.
items:
properties:
match:
description: Match is the value used to match using
particular Type
type: string
type:
description: 'Type defined the type of selector,
one of: prefix, regex, exact'
enum:
- Prefix
- Regex
- Exact
- Contains
type: string
required:
- match
- type
type: object
type: array
include:
description: |-
Include makes it possible to include additional metrics in a selected profiles.
Include takes precedence over Exclude.
items:
properties:
match:
description: Match is the value used to match using
particular Type
type: string
type:
description: 'Type defined the type of selector,
one of: prefix, regex, exact'
enum:
- Prefix
- Regex
- Exact
- Contains
type: string
required:
- match
- type
type: object
type: array
type: object
type: object
type: object
targetRef:

View File

@ -0,0 +1,100 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.14.0
name: meshservices.kuma.io
spec:
group: kuma.io
names:
categories:
- kuma
kind: MeshService
listKind: MeshServiceList
plural: meshservices
singular: meshservice
scope: Namespaced
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
description: Spec is the specification of the Kuma MeshService resource.
properties:
ports:
items:
properties:
port:
format: int32
type: integer
protocol:
default: tcp
description: Protocol identifies a protocol supported by a service.
type: string
targetPort:
format: int32
type: integer
required:
- port
type: object
type: array
x-kubernetes-list-map-keys:
- port
- protocol
x-kubernetes-list-type: map
selector:
properties:
dataplaneTags:
additionalProperties:
type: string
type: object
type: object
type: object
status:
description: Status is the current status of the Kuma MeshService resource.
properties:
addresses:
items:
properties:
hostname:
type: string
type: object
type: array
tls:
properties:
status:
enum:
- Ready
- NotReady
type: string
type: object
vips:
items:
properties:
ip:
type: string
type: object
type: array
type: object
type: object
served: true
storage: true

View File

@ -132,6 +132,11 @@ spec:
Name of the referenced resource. Can only be used with kinds: `MeshService`,
`MeshServiceSubset` and `MeshGatewayRoute`
type: string
port:
description: Port is only supported when this
ref refers to a real MeshService object
format: int32
type: integer
proxyTypes:
description: |-
ProxyTypes specifies the data plane types that are subject to the policy. When not specified,

View File

@ -24,7 +24,7 @@ You can access the control-plane via either the GUI, kubectl, the HTTP API, or t
update the CRDs if the new {{ include "kuma.name" . }} version has changes
to the CRDs. You can achieve this by calling the following command:
kumactl install crds --no-config{{ if .Values.experimental.gatewayAPI }} --experimental-gatewayapi{{ end }} | kubectl apply -f
kumactl install crds --no-config | kubectl apply -f
{{- if and .Values.experimental.ebpf.enabled (not .Values.cni.enabled) }}

View File

@ -184,6 +184,16 @@ returns: formatted image string
{{- define "kuma.parentSecrets" -}}
{{- end -}}
{{- define "kuma.pluginPoliciesEnabled" -}}
{{- $list := list -}}
{{- range $k, $v := .Values.plugins.policies -}}
{{- if $v -}}
{{- $list = append $list (printf "%s" $k) -}}
{{- end -}}
{{- end -}}
{{ join "," $list }}
{{- end -}}
{{- define "kuma.defaultEnv" -}}
env:
{{ include "kuma.parentEnv" . }}
@ -255,8 +265,8 @@ env:
value: "false"
- name: KUMA_RUNTIME_KUBERNETES_SERVICE_ACCOUNT_NAME
value: "system:serviceaccount:{{ .Release.Namespace }}:{{ include "kuma.name" . }}-control-plane"
{{- if .Values.experimental.gatewayAPI }}
- name: KUMA_EXPERIMENTAL_GATEWAY_API
{{- if .Values.experimental.sidecarContainers }}
- name: KUMA_EXPERIMENTAL_SIDECAR_CONTAINERS
value: "true"
{{- end }}
{{- if .Values.cni.enabled }}
@ -287,6 +297,8 @@ env:
- name: KUMA_MULTIZONE_ZONE_KDS_TLS_SKIP_VERIFY
value: "true"
{{- end }}
- name: KUMA_PLUGIN_POLICIES_ENABLED
value: {{ include "kuma.pluginPoliciesEnabled" . | quote }}
{{- end }}
{{- define "kuma.controlPlane.tls.general.caSecretName" -}}
@ -304,6 +316,8 @@ env:
{{ end }}
env:
- name: KUMA_PLUGIN_POLICIES_ENABLED
value: {{ include "kuma.pluginPoliciesEnabled" . | quote }}
- name: KUMA_GENERAL_WORK_DIR
value: "/tmp/kuma"
- name: KUMA_ENVIRONMENT
@ -358,6 +372,8 @@ env:
- name: KUMA_MULTIZONE_GLOBAL_KDS_TLS_KEY_FILE
value: /var/run/secrets/kuma.io/kds-server-tls-cert/tls.key
{{- end }}
- name: KUMA_STORE_POSTGRES_TLS_MODE
value: {{ .Values.postgres.tls.mode }}
{{- if or (eq .Values.postgres.tls.mode "verifyCa") (eq .Values.postgres.tls.mode "verifyFull") }}
{{- if empty .Values.postgres.tls.caSecretName }}
{{ fail "if mode is 'verifyCa' or 'verifyFull' then you must provide .Values.postgres.tls.caSecretName" }}
@ -372,8 +388,6 @@ env:
- name: KUMA_STORE_POSTGRES_TLS_CA_PATH
value: /var/run/secrets/kuma.io/postgres-tls-cert/ca.crt
{{- end }}
- name: KUMA_STORE_POSTGRES_TLS_MODE
value: {{ .Values.postgres.tls.mode }}
{{- if .Values.postgres.tls.disableSSLSNI }}
- name: KUMA_STORE_POSTGRES_TLS_DISABLE_SSLSNI
value: {{ .Values.postgres.tls.disableSSLSNI }}

View File

@ -47,5 +47,5 @@ roleRef:
subjects:
- kind: ServiceAccount
name: {{ include "kuma.name" . }}-cni
namespace: kube-system
namespace: {{ .Values.cni.namespace }}
{{- end }}

View File

@ -29,9 +29,7 @@ rules:
- pods
- configmaps
- nodes
{{- if .Values.experimental.gatewayAPI }}
- secrets
{{- end }}
verbs:
- get
- list
@ -144,11 +142,16 @@ rules:
- meshgateways
- meshgatewayroutes
- meshgatewayinstances
{{- if .Values.experimental.gatewayAPI }}
- meshgatewayconfigs
{{- end }}
{{- range $policy, $empty := .Values.plugins.policies }}
{{- range $policy, $v := .Values.plugins.policies }}
{{- if $v }}
- {{ $policy }}
{{- end}}
{{- end}}
{{- range $policy, $v := .Values.plugins.resources }}
{{- if $v }}
- {{ $policy }}
{{- end}}
{{- end}}
verbs:
- get

View File

@ -79,9 +79,17 @@ webhooks:
- UPDATE
resources:
- meshes
{{- range $policy, $empty := .Values.plugins.policies }}
- meshgateways
{{- range $policy, $v := .Values.plugins.policies }}
{{- if $v }}
- {{ $policy }}
{{- end}}
{{- end}}
{{- range $policy, $v := .Values.plugins.resources }}
{{- if $v }}
- {{ $policy }}
{{- end}}
{{- end}}
sideEffects: None
- name: owner-reference.kuma-admission.kuma.io
admissionReviewVersions: ["v1"]
@ -120,9 +128,16 @@ webhooks:
- trafficroutes
- traffictraces
- virtualoutbounds
{{- range $policy, $empty := .Values.plugins.policies }}
{{- range $policy, $v := .Values.plugins.policies }}
{{- if $v }}
- {{ $policy }}
{{- end}}
{{- end}}
{{- range $policy, $v := .Values.plugins.resources }}
{{- if $v }}
- {{ $policy }}
{{- end}}
{{- end}}
{{ .Values.controlPlane.webhooks.ownerReference.additionalRules | nindent 6 }}
sideEffects: None
{{- if ne .Values.controlPlane.mode "global" }}
@ -232,9 +247,16 @@ webhooks:
- virtualoutbounds
- zones
- containerpatches
{{- range $policy, $empty := .Values.plugins.policies }}
{{- range $policy, $v := .Values.plugins.policies }}
{{- if $v }}
- {{ $policy }}
{{- end}}
{{- end}}
{{- range $policy, $v := .Values.plugins.resources }}
{{- if $v }}
- {{ $policy }}
{{- end}}
{{- end}}
{{ .Values.controlPlane.webhooks.validator.additionalRules | nindent 6 }}
sideEffects: None
{{- if ne .Values.controlPlane.mode "global" }}

View File

@ -16,5 +16,5 @@ spec:
selector:
matchLabels:
{{- include "kuma.selectorLabels" . | nindent 6 }}
app: kuma-egress
app: {{ include "kuma.name" . }}-egress
{{ end }}

View File

@ -1,4 +1,4 @@
{{- if and .Values.experimental.gatewayAPI (.Capabilities.APIVersions.Has "gateway.networking.k8s.io/v1/GatewayClass") }}
{{- if .Capabilities.APIVersions.Has "gateway.networking.k8s.io/v1/GatewayClass" }}
---
apiVersion: gateway.networking.k8s.io/v1
kind: GatewayClass
@ -6,7 +6,7 @@ metadata:
name: kuma
spec:
controllerName: "gateways.kuma.io/controller"
{{- else if and .Values.experimental.gatewayAPI (.Capabilities.APIVersions.Has "gateway.networking.k8s.io/v1beta1/GatewayClass") }}
{{- else if .Capabilities.APIVersions.Has "gateway.networking.k8s.io/v1beta1/GatewayClass" }}
---
apiVersion: gateway.networking.k8s.io/v1beta1
kind: GatewayClass

View File

@ -16,5 +16,5 @@ spec:
selector:
matchLabels:
{{- include "kuma.selectorLabels" . | nindent 6 }}
app: kuma-ingress
app: {{ include "kuma.name" . }}-ingress
{{ end }}

View File

@ -85,7 +85,7 @@ data:
save_crds.sh: |
set -e
crds="$(kumactl install crds --no-config {{ if .Values.experimental.gatewayAPI }}--experimental-gatewayapi{{end}})"
crds="$(kumactl install crds --no-config)"
if [ -n "${crds}" ]; then
echo "found crds - saving to /kuma/crds/crds.yaml"

View File

@ -678,8 +678,6 @@ hooks:
readOnlyRootFilesystem: false
experimental:
# -- If true, it installs experimental Gateway API support
gatewayAPI: false
# Configuration for the experimental ebpf mode for transparent proxy
ebpf:
# -- If true, ebpf will be used instead of using iptables to install/configure transparent proxy
@ -693,9 +691,12 @@ experimental:
# -- Name of the network interface which TC programs should be attached to, we'll try to automatically determine it if empty
tcAttachIface: ""
# -- Path where compiled eBPF programs which will be installed can be found
programsSourcePath: /kuma/ebpf
programsSourcePath: /tmp/kuma-ebpf
# -- If false, it uses legacy API for resource synchronization
deltaKds: true
# -- If true, enable native Kubernetes sidecars. This requires at least
# Kubernetes v1.29
sidecarContainers: false
# Postgres' settings for universal control plane on k8s
postgres:
@ -721,18 +722,20 @@ postgres:
# @ignored for helm-docs
plugins:
resources:
meshservices: true
policies:
meshaccesslogs: {}
meshcircuitbreakers: {}
meshfaultinjections: {}
meshhealthchecks: {}
meshhttproutes: {}
meshloadbalancingstrategies: {}
meshmetrics: {}
meshproxypatches: {}
meshratelimits: {}
meshretries: {}
meshtcproutes: {}
meshtimeouts: {}
meshtraces: {}
meshtrafficpermissions: {}
meshaccesslogs: true
meshcircuitbreakers: true
meshfaultinjections: true
meshhealthchecks: true
meshhttproutes: true
meshloadbalancingstrategies: true
meshmetrics: true
meshproxypatches: true
meshratelimits: true
meshretries: true
meshtcproutes: true
meshtimeouts: true
meshtraces: true
meshtrafficpermissions: true

View File

@ -28,4 +28,4 @@ name: loft
sources:
- https://github.com/loft-sh/loft
type: application
version: 3.4.4
version: 3.4.5

View File

@ -17,4 +17,4 @@ maintainers:
- email: natalia.marukovich@percona.com
name: nmarukovich
name: pxc-db
version: 1.14.2
version: 1.14.3

View File

@ -52,7 +52,7 @@ The chart can be customized using the following configurable parameters:
| `pxc.size` | PXC Cluster target member (pod) quantity. Can't even if `allowUnsafeConfigurations` is `true` | `3` |
| `pxc.clusterSecretName` | Specify if you want to use custom or Operator generated users secret (if the one specified doesn't exist) | `` |
| `pxc.image.repository` | PXC Container image repository | `percona/percona-xtradb-cluster` |
| `pxc.image.tag` | PXC Container image tag | `8.0.35-27.1` |
| `pxc.image.tag` | PXC Container image tag | `8.0.36-28.1` |
| `pxc.imagePullPolicy` | The policy used to update images | `` |
| `pxc.autoRecovery` | Enable full cluster crash auto recovery | `true` |
| `pxc.expose.enabled` | Enable or disable exposing `Percona XtraDB Cluster` nodes with dedicated IP addresses | `true` |
@ -175,7 +175,7 @@ The chart can be customized using the following configurable parameters:
| |
| `proxysql.enabled` | Use ProxySQL as TCP proxy for PXC cluster | `false` |
| `proxysql.size` | ProxySQL target pod quantity. Can't even if `allowUnsafeConfigurations` is `true` | `3` |
| `proxysql.image` | ProxySQL Container image | `percona/percona-xtradb-cluster-operator:1.14.0-proxysql` |
| `proxysql.image` | ProxySQL Container image | `percona/percona-xtradb-cluster-operator:1.14.0-proxysql2.5.5-1.2` |
| `proxysql.imagePullPolicy` | The policy used to update images | `` |
| `proxysql.imagePullSecrets` | ProxySQL Container pull secret | `[]` |
| `proxysql.configuration` | User defined ProxySQL options according to ProxySQL configuration file syntax | `` |
@ -230,7 +230,7 @@ The chart can be customized using the following configurable parameters:
| |
| `pmm.enabled` | Enable integration with [Percona Monitoring and Management software](https://www.percona.com/doc/kubernetes-operator-for-pxc/monitoring.html) | `false` |
| `pmm.image.repository` | PMM Container image repository | `percona/pmm-client` |
| `pmm.image.tag` | PMM Container image tag | `2.41.1` |
| `pmm.image.tag` | PMM Container image tag | `2.41.2` |
| `pmm.imagePullSecrets` | PMM Container pull secret | `[]` |
| `pmm.imagePullPolicy` | The policy used to update images | `` |
| `pmm.serverHost` | PMM server related K8S service hostname | `monitoring-service` |

View File

@ -55,7 +55,7 @@ pxc:
size: 3
image:
repository: percona/percona-xtradb-cluster
tag: 8.0.35-27.1
tag: 8.0.36-28.1
# imagePullPolicy: Always
autoRecovery: true
# expose:
@ -367,7 +367,7 @@ haproxy:
proxysql:
enabled: false
size: 3
image: ""
image: "percona/percona-xtradb-cluster-operator:1.14.0-proxysql2.5.5-1.2"
# imagePullPolicy: Always
imagePullSecrets: []
# configuration: |
@ -543,7 +543,7 @@ pmm:
enabled: false
image:
repository: percona/pmm-client
tag: 2.41.0
tag: 2.41.2
# imagePullPolicy: Always
imagePullSecrets: []
serverHost: monitoring-service

View File

@ -37,4 +37,4 @@ name: redpanda
sources:
- https://github.com/redpanda-data/helm-charts
type: application
version: 5.7.40
version: 5.7.41

View File

@ -3,7 +3,7 @@
description: Find the default values and descriptions of settings in the Redpanda Helm chart.
---
![Version: 5.7.40](https://img.shields.io/badge/Version-5.7.40-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v23.3.11](https://img.shields.io/badge/AppVersion-v23.3.11-informational?style=flat-square)
![Version: 5.7.41](https://img.shields.io/badge/Version-5.7.41-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v23.3.11](https://img.shields.io/badge/AppVersion-v23.3.11-informational?style=flat-square)
This page describes the official Redpanda Helm Chart. In particular, this page describes the contents of the charts [`values.yaml` file](https://github.com/redpanda-data/helm-charts/blob/main/charts/redpanda/values.yaml). Each of the settings is listed and described on this page, along with any default values.
@ -625,7 +625,7 @@ CPU resources. For details, see the [Pod resources documentation](https://docs.r
### [resources.cpu.cores](https://artifacthub.io/packages/helm/redpanda-data/redpanda?modal=values&path=resources.cpu.cores)
Redpanda makes use of a thread per core model. For details, see this [blog](https://redpanda.com/blog/tpc-buffers). For this reason, Redpanda should only be given full cores. Note: You can increase cores, but decreasing cores is not currently supported. See the [GitHub issue](https://github.com/redpanda-data/redpanda/issues/350). This setting is equivalent to `--smp`, `resources.requests.cpu`, and `resources.limits.cpu`. For production, use `4` or greater.
Redpanda makes use of a thread per core model. For details, see this [blog](https://redpanda.com/blog/tpc-buffers). For this reason, Redpanda should only be given full cores. Note: You can increase cores, but decreasing cores is not currently supported. See the [GitHub issue](https://github.com/redpanda-data/redpanda/issues/350). This setting is equivalent to `--smp`, `resources.requests.cpu`, and `resources.limits.cpu`. For production, use `4` or greater. To maximize efficiency, use the `static` CPU manager policy by specifying an even integer for CPU resource requests and limits. This policy gives the Pods running Redpanda brokers access to exclusive CPUs on the node. See https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/#static-policy.
**Default:** `1`
@ -639,6 +639,12 @@ Memory resources For details, see the [Pod resources documentation](https://docs
{"container":{"max":"2.5Gi"}}
```
### [resources.memory.container](https://artifacthub.io/packages/helm/redpanda-data/redpanda?modal=values&path=resources.memory.container)
Enables memory locking. For production, set to `true`. enable_memory_locking: false It is recommended to have at least 2Gi of memory per core for the Redpanda binary. This memory is taken from the total memory given to each container. The Helm chart allocates 80% of the container's memory to Redpanda, leaving the rest for the Seastar subsystem (reserveMemory) and other container processes. So at least 2.5Gi per core is recommended in order to ensure Redpanda has a full 2Gi. These values affect `--memory` and `--reserve-memory` flags passed to Redpanda and the memory requests/limits in the StatefulSet. Valid suffixes: B, K, M, G, Ki, Mi, and Gi To create `Guaranteed` Pod QoS for Redpanda brokers, provide both container max and min values for the container. For details, see https://kubernetes.io/docs/tasks/configure-pod-container/quality-service-pod/#create-a-pod-that-gets-assigned-a-qos-class-of-guaranteed * Every container in the Pod must have a memory limit and a memory request. * For every container in the Pod, the memory limit must equal the memory request.
**Default:** `{"max":"2.5Gi"}`
### [resources.memory.container.max](https://artifacthub.io/packages/helm/redpanda-data/redpanda?modal=values&path=resources.memory.container.max)
Maximum memory count for each Redpanda broker. Equivalent to `resources.limits.memory`. For production, use `10Gi` or greater.
@ -711,6 +717,8 @@ DEPRECATED Please use statefulset.podTemplate.annotations. Annotations are used
### [statefulset.initContainers.configurator.resources](https://artifacthub.io/packages/helm/redpanda-data/redpanda?modal=values&path=statefulset.initContainers.configurator.resources)
To create `Guaranteed` Pods for Redpanda brokers, provide both requests and limits for CPU and memory. For details, see https://kubernetes.io/docs/tasks/configure-pod-container/quality-service-pod/#create-a-pod-that-gets-assigned-a-qos-class-of-guaranteed * Every container in the Pod must have a CPU limit and a CPU request. * For every container in the Pod, the CPU limit must equal the CPU request.
**Default:** `{}`
### [statefulset.initContainers.extraInitContainers](https://artifacthub.io/packages/helm/redpanda-data/redpanda?modal=values&path=statefulset.initContainers.extraInitContainers)
@ -731,6 +739,8 @@ DEPRECATED Please use statefulset.podTemplate.annotations. Annotations are used
### [statefulset.initContainers.fsValidator.resources](https://artifacthub.io/packages/helm/redpanda-data/redpanda?modal=values&path=statefulset.initContainers.fsValidator.resources)
To create `Guaranteed` Pods for Redpanda brokers, provide both requests and limits for CPU and memory. For details, see https://kubernetes.io/docs/tasks/configure-pod-container/quality-service-pod/#create-a-pod-that-gets-assigned-a-qos-class-of-guaranteed * Every container in the Pod must have a CPU limit and a CPU request. * For every container in the Pod, the CPU limit must equal the CPU request.
**Default:** `{}`
### [statefulset.initContainers.setDataDirOwnership.enabled](https://artifacthub.io/packages/helm/redpanda-data/redpanda?modal=values&path=statefulset.initContainers.setDataDirOwnership.enabled)
@ -745,6 +755,8 @@ In environments where root is not allowed, you cannot change the ownership of fi
### [statefulset.initContainers.setDataDirOwnership.resources](https://artifacthub.io/packages/helm/redpanda-data/redpanda?modal=values&path=statefulset.initContainers.setDataDirOwnership.resources)
To create `Guaranteed` Pods for Redpanda brokers, provide both requests and limits for CPU and memory. For details, see https://kubernetes.io/docs/tasks/configure-pod-container/quality-service-pod/#create-a-pod-that-gets-assigned-a-qos-class-of-guaranteed * Every container in the Pod must have a CPU limit and a CPU request. * For every container in the Pod, the CPU limit must equal the CPU request.
**Default:** `{}`
### [statefulset.initContainers.setTieredStorageCacheDirOwnership.extraVolumeMounts](https://artifacthub.io/packages/helm/redpanda-data/redpanda?modal=values&path=statefulset.initContainers.setTieredStorageCacheDirOwnership.extraVolumeMounts)
@ -753,6 +765,8 @@ In environments where root is not allowed, you cannot change the ownership of fi
### [statefulset.initContainers.setTieredStorageCacheDirOwnership.resources](https://artifacthub.io/packages/helm/redpanda-data/redpanda?modal=values&path=statefulset.initContainers.setTieredStorageCacheDirOwnership.resources)
To create `Guaranteed` Pods for Redpanda brokers, provide both requests and limits for CPU and memory. For details, see https://kubernetes.io/docs/tasks/configure-pod-container/quality-service-pod/#create-a-pod-that-gets-assigned-a-qos-class-of-guaranteed * Every container in the Pod must have a CPU limit and a CPU request. * For every container in the Pod, the CPU limit must equal the CPU request.
**Default:** `{}`
### [statefulset.initContainers.tuning.extraVolumeMounts](https://artifacthub.io/packages/helm/redpanda-data/redpanda?modal=values&path=statefulset.initContainers.tuning.extraVolumeMounts)
@ -761,6 +775,8 @@ In environments where root is not allowed, you cannot change the ownership of fi
### [statefulset.initContainers.tuning.resources](https://artifacthub.io/packages/helm/redpanda-data/redpanda?modal=values&path=statefulset.initContainers.tuning.resources)
To create `Guaranteed` Pods for Redpanda brokers, provide both requests and limits for CPU and memory. For details, see https://kubernetes.io/docs/tasks/configure-pod-container/quality-service-pod/#create-a-pod-that-gets-assigned-a-qos-class-of-guaranteed * Every container in the Pod must have a CPU limit and a CPU request. * For every container in the Pod, the CPU limit must equal the CPU request.
**Default:** `{}`
### [statefulset.livenessProbe.failureThreshold](https://artifacthub.io/packages/helm/redpanda-data/redpanda?modal=values&path=statefulset.livenessProbe.failureThreshold)
@ -883,6 +899,8 @@ Number of Redpanda brokers (Redpanda Data recommends setting this to the number
### [statefulset.sideCars.configWatcher.resources](https://artifacthub.io/packages/helm/redpanda-data/redpanda?modal=values&path=statefulset.sideCars.configWatcher.resources)
To create `Guaranteed` Pods for Redpanda brokers, provide both requests and limits for CPU and memory. For details, see https://kubernetes.io/docs/tasks/configure-pod-container/quality-service-pod/#create-a-pod-that-gets-assigned-a-qos-class-of-guaranteed * Every container in the Pod must have a memory limit and a memory request. * For every container in the Pod, the memory limit must equal the memory request. * Every container in the Pod must have a CPU limit and a CPU request. * For every container in the Pod, the CPU limit must equal the CPU request. To maximize efficiency, use the `static` CPU manager policy by specifying an even integer for CPU resource requests and limits. This policy gives the Pods running Redpanda brokers access to exclusive CPUs on the node. For details, see https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/#static-policy
**Default:** `{}`
### [statefulset.sideCars.configWatcher.securityContext](https://artifacthub.io/packages/helm/redpanda-data/redpanda?modal=values&path=statefulset.sideCars.configWatcher.securityContext)
@ -919,6 +937,8 @@ Number of Redpanda brokers (Redpanda Data recommends setting this to the number
### [statefulset.sideCars.controllers.resources](https://artifacthub.io/packages/helm/redpanda-data/redpanda?modal=values&path=statefulset.sideCars.controllers.resources)
To create `Guaranteed` Pods for Redpanda brokers, provide both requests and limits for CPU and memory. For details, see https://kubernetes.io/docs/tasks/configure-pod-container/quality-service-pod/#create-a-pod-that-gets-assigned-a-qos-class-of-guaranteed * Every container in the Pod must have a CPU limit and a CPU request. * For every container in the Pod, the CPU limit must equal the CPU request. * Every container in the Pod must have a CPU limit and a CPU request. * For every container in the Pod, the CPU limit must equal the CPU request. To maximize efficiency, use the `static` CPU manager policy by specifying an even integer for CPU resource requests and limits. This policy gives the Pods running Redpanda brokers access to exclusive CPUs on the node. For details, see https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/#static-policy
**Default:** `{}`
### [statefulset.sideCars.controllers.run[0]](https://artifacthub.io/packages/helm/redpanda-data/redpanda?modal=values&path=statefulset.sideCars.controllers.run[0])

View File

@ -115,7 +115,7 @@ spec:
- name: datadir
mountPath: /var/lib/redpanda/data
{{- if get .Values.statefulset.initContainers.fsValidator "resources" }}
resources: {{- toYaml .Values.statefulset.fsValidator.tuning.resources | nindent 12 }}
resources: {{- toYaml .Values.statefulset.fsValidator.resources | nindent 12 }}
{{- end }}
{{- end }}
{{- if (include "storage-tiered-config" .|fromJson).cloud_storage_enabled }}

View File

@ -344,6 +344,11 @@ resources:
#
# This setting is equivalent to `--smp`, `resources.requests.cpu`, and `resources.limits.cpu`.
# For production, use `4` or greater.
#
# To maximize efficiency, use the `static` CPU manager policy by specifying an even integer for
# CPU resource requests and limits. This policy gives the Pods running Redpanda brokers
# access to exclusive CPUs on the node. See
# https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/#static-policy.
cores: 1
#
# -- Overprovisioned means Redpanda won't assume it has all of the provisioned CPU.
@ -358,7 +363,7 @@ resources:
# For details,
# see the [Pod resources documentation](https://docs.redpanda.com/docs/manage/kubernetes/manage-resources/#configure-memory-resources).
memory:
# Enables memory locking.
# -- Enables memory locking.
# For production, set to `true`.
# enable_memory_locking: false
#
@ -371,6 +376,11 @@ resources:
# These values affect `--memory` and `--reserve-memory` flags passed to Redpanda and the memory
# requests/limits in the StatefulSet.
# Valid suffixes: B, K, M, G, Ki, Mi, and Gi
# To create `Guaranteed` Pod QoS for Redpanda brokers, provide both container max and min values for the container.
# For details, see
# https://kubernetes.io/docs/tasks/configure-pod-container/quality-service-pod/#create-a-pod-that-gets-assigned-a-qos-class-of-guaranteed
# * Every container in the Pod must have a memory limit and a memory request.
# * For every container in the Pod, the memory limit must equal the memory request.
#
container:
# Minimum memory count for each Redpanda broker.
@ -709,6 +719,17 @@ statefulset:
sideCars:
configWatcher:
enabled: true
# -- To create `Guaranteed` Pods for Redpanda brokers, provide both requests and limits for CPU and memory. For details, see
# https://kubernetes.io/docs/tasks/configure-pod-container/quality-service-pod/#create-a-pod-that-gets-assigned-a-qos-class-of-guaranteed
# * Every container in the Pod must have a memory limit and a memory request.
# * For every container in the Pod, the memory limit must equal the memory request.
# * Every container in the Pod must have a CPU limit and a CPU request.
# * For every container in the Pod, the CPU limit must equal the CPU request.
#
# To maximize efficiency, use the `static` CPU manager policy by specifying an even integer for
# CPU resource requests and limits. This policy gives the Pods running Redpanda brokers
# access to exclusive CPUs on the node. For details, see
# https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/#static-policy
resources: {}
securityContext: {}
extraVolumeMounts: |-
@ -727,6 +748,18 @@ statefulset:
repository: docker.redpanda.com/redpandadata/redpanda-operator
# You must also enable RBAC, `rbac.enabled=true`, to deploy this sidecar
enabled: false
# -- To create `Guaranteed` Pods for Redpanda brokers, provide both requests and limits for CPU and memory. For details, see
# https://kubernetes.io/docs/tasks/configure-pod-container/quality-service-pod/#create-a-pod-that-gets-assigned-a-qos-class-of-guaranteed
#
# * Every container in the Pod must have a CPU limit and a CPU request.
# * For every container in the Pod, the CPU limit must equal the CPU request.
# * Every container in the Pod must have a CPU limit and a CPU request.
# * For every container in the Pod, the CPU limit must equal the CPU request.
#
# To maximize efficiency, use the `static` CPU manager policy by specifying an even integer for
# CPU resource requests and limits. This policy gives the Pods running Redpanda brokers
# access to exclusive CPUs on the node. For details, see
# https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/#static-policy
resources: {}
securityContext: {}
healthProbeAddress: ":8085"
@ -738,21 +771,41 @@ statefulset:
fsValidator:
enabled: false
expectedFS: xfs
# -- To create `Guaranteed` Pods for Redpanda brokers, provide both requests and limits for CPU and memory. For details, see
# https://kubernetes.io/docs/tasks/configure-pod-container/quality-service-pod/#create-a-pod-that-gets-assigned-a-qos-class-of-guaranteed
# * Every container in the Pod must have a CPU limit and a CPU request.
# * For every container in the Pod, the CPU limit must equal the CPU request.
resources: {}
extraVolumeMounts: |-
tuning:
# -- To create `Guaranteed` Pods for Redpanda brokers, provide both requests and limits for CPU and memory. For details, see
# https://kubernetes.io/docs/tasks/configure-pod-container/quality-service-pod/#create-a-pod-that-gets-assigned-a-qos-class-of-guaranteed
# * Every container in the Pod must have a CPU limit and a CPU request.
# * For every container in the Pod, the CPU limit must equal the CPU request.
resources: {}
extraVolumeMounts: |-
setDataDirOwnership:
# -- In environments where root is not allowed, you cannot change the ownership of files and directories.
# Enable `setDataDirOwnership` when using default minikube cluster configuration.
enabled: false
# -- To create `Guaranteed` Pods for Redpanda brokers, provide both requests and limits for CPU and memory. For details, see
# https://kubernetes.io/docs/tasks/configure-pod-container/quality-service-pod/#create-a-pod-that-gets-assigned-a-qos-class-of-guaranteed
# * Every container in the Pod must have a CPU limit and a CPU request.
# * For every container in the Pod, the CPU limit must equal the CPU request.
resources: {}
extraVolumeMounts: |-
setTieredStorageCacheDirOwnership:
# -- To create `Guaranteed` Pods for Redpanda brokers, provide both requests and limits for CPU and memory. For details, see
# https://kubernetes.io/docs/tasks/configure-pod-container/quality-service-pod/#create-a-pod-that-gets-assigned-a-qos-class-of-guaranteed
# * Every container in the Pod must have a CPU limit and a CPU request.
# * For every container in the Pod, the CPU limit must equal the CPU request.
resources: {}
extraVolumeMounts: |-
configurator:
# -- To create `Guaranteed` Pods for Redpanda brokers, provide both requests and limits for CPU and memory. For details, see
# https://kubernetes.io/docs/tasks/configure-pod-container/quality-service-pod/#create-a-pod-that-gets-assigned-a-qos-class-of-guaranteed
# * Every container in the Pod must have a CPU limit and a CPU request.
# * For every container in the Pod, the CPU limit must equal the CPU request.
resources: {}
extraVolumeMounts: |-
## Additional init containers

View File

@ -4,7 +4,7 @@ annotations:
catalog.cattle.io/kube-version: '>= 1.17.0-0'
catalog.cattle.io/release-name: speedscale-operator
apiVersion: v1
appVersion: 2.1.235
appVersion: 2.1.247
description: Stress test your APIs with real world scenarios. Collect and replay
traffic without scripting.
home: https://speedscale.com
@ -24,4 +24,4 @@ maintainers:
- email: support@speedscale.com
name: Speedscale Support
name: speedscale-operator
version: 2.1.18
version: 2.1.19

View File

@ -101,10 +101,10 @@ _See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documen
A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an
incompatible breaking change needing manual actions.
### Upgrade to 2.1.18
### Upgrade to 2.1.19
```bash
kubectl apply --server-side -f https://raw.githubusercontent.com/speedscale/operator-helm/main/2.1.18/templates/crds/trafficreplays.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/speedscale/operator-helm/main/2.1.19/templates/crds/trafficreplays.yaml
```
### Upgrade to 1.1.0

View File

@ -101,10 +101,10 @@ _See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documen
A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an
incompatible breaking change needing manual actions.
### Upgrade to 2.1.18
### Upgrade to 2.1.19
```bash
kubectl apply --server-side -f https://raw.githubusercontent.com/speedscale/operator-helm/main/2.1.18/templates/crds/trafficreplays.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/speedscale/operator-helm/main/2.1.19/templates/crds/trafficreplays.yaml
```
### Upgrade to 1.1.0

View File

@ -20,7 +20,7 @@ clusterName: "my-cluster"
# Speedscale components image settings.
image:
registry: gcr.io/speedscale
tag: v2.1.235
tag: v2.1.247
pullPolicy: Always
# Log level for Speedscale components.

View File

@ -1 +0,0 @@
tests

View File

@ -3,20 +3,18 @@ annotations:
catalog.cattle.io/display-name: YugabyteDB
catalog.cattle.io/kube-version: '>=1.18-0'
catalog.cattle.io/release-name: yugabyte
charts.openshift.io/name: yugabyte
apiVersion: v2
appVersion: 2.18.7.0-b30
apiVersion: v1
appVersion: 2.14.16.0-b17
description: YugabyteDB is the high-performance distributed SQL database for building
global, internet-scale apps.
home: https://www.yugabyte.com
icon: https://avatars0.githubusercontent.com/u/17074854?s=200&v=4
kubeVersion: '>=1.18-0'
maintainers:
- email: sanketh@yugabyte.com
name: Sanketh Indarapu
- email: gjalla@yugabyte.com
name: Govardhan Reddy Jalla
- email: ram@yugabyte.com
name: Ram Sri
- email: arnav@yugabyte.com
name: Arnav Agarwal
name: yugabyte
sources:
- https://github.com/yugabyte/yugabyte-db
version: 2.18.7
version: 2.14.16

View File

@ -1 +1 @@
This chart bootstraps an RF3 YugabyteDB version 2.18.7.0-b30 cluster using the Helm Package Manager.
This chart bootstraps an RF3 Yugabyte DB version 2.14.16.0-b17 cluster using the Helm Package Manager.

View File

@ -11,209 +11,84 @@ from sys import exit
import json
import base64
import tempfile
import time
import os.path
def run_command(command_args, namespace=None, as_json=True, log_command=True):
command = ["kubectl"]
def run_command(command_args, namespace=None, as_json=True):
command = ['kubectl']
if namespace:
command.extend(["--namespace", namespace])
command.extend(['--namespace', namespace])
command.extend(command_args)
if as_json:
command.extend(["-o", "json"])
if log_command:
print("Running command: {}".format(" ".join(command)))
output = check_output(command)
if as_json:
return json.loads(output)
command.extend(['-o', 'json'])
return json.loads(check_output(command))
else:
return output.decode("utf8")
return check_output(command).decode('utf8')
def create_sa_token_secret(directory, sa_name, namespace):
"""Creates a service account token secret for sa_name in
namespace. Returns the name of the secret created.
Ref:
https://k8s.io/docs/concepts/configuration/secret/#service-account-token-secrets
"""
token_secret = {
"apiVersion": "v1",
"data": {
"do-not-delete-used-for-yugabyte-anywhere": "MQ==",
},
"kind": "Secret",
"metadata": {
"annotations": {
"kubernetes.io/service-account.name": sa_name,
},
"name": sa_name,
},
"type": "kubernetes.io/service-account-token",
}
token_secret_file_name = os.path.join(directory, "token_secret.yaml")
with open(token_secret_file_name, "w") as token_secret_file:
json.dump(token_secret, token_secret_file)
run_command(["apply", "-f", token_secret_file_name], namespace)
return sa_name
def get_secret_data(secret, namespace):
"""Returns the secret in JSON format if it has ca.crt and token in
it, else returns None. It retries 3 times with 1 second timeout
for the secret to be populated with this data.
"""
secret_data = None
num_retries = 5
timeout = 2
while True:
secret_json = run_command(["get", "secret", secret], namespace)
if "ca.crt" in secret_json["data"] and "token" in secret_json["data"]:
secret_data = secret_json
break
num_retries -= 1
if num_retries == 0:
break
print(
"Secret '{}' is not populated. Sleep {}s, ({} retries left)".format(
secret, timeout, num_retries
)
)
time.sleep(timeout)
return secret_data
def get_secrets_for_sa(sa_name, namespace):
"""Returns a list of all service account token secrets associated
with the given sa_name in the namespace.
"""
secrets = run_command(
[
"get",
"secret",
"--field-selector",
"type=kubernetes.io/service-account-token",
"-o",
'jsonpath="{.items[?(@.metadata.annotations.kubernetes\.io/service-account\.name == "'
+ sa_name
+ '")].metadata.name}"',
],
as_json=False,
)
return secrets.strip('"').split()
parser = argparse.ArgumentParser(description="Generate KubeConfig with Token")
parser.add_argument("-s", "--service_account", help="Service Account name", required=True)
parser.add_argument("-n", "--namespace", help="Kubernetes namespace", default="kube-system")
parser.add_argument("-c", "--context", help="kubectl context")
parser.add_argument("-o", "--output_file", help="output file path")
parser = argparse.ArgumentParser(description='Generate KubeConfig with Token')
parser.add_argument('-s', '--service_account', help='Service Account name', required=True)
parser.add_argument('-n', '--namespace', help='Kubernetes namespace', default='kube-system')
parser.add_argument('-c', '--context', help='kubectl context')
args = vars(parser.parse_args())
# if the context is not provided we use the current-context
context = args["context"]
context = args['context']
if context is None:
context = run_command(["config", "current-context"], args["namespace"], as_json=False)
context = run_command(['config', 'current-context'],
args['namespace'], as_json=False)
cluster_attrs = run_command(
["config", "get-contexts", context.strip(), "--no-headers"], args["namespace"], as_json=False
)
cluster_attrs = run_command(['config', 'get-contexts', context.strip(),
'--no-headers'], args['namespace'], as_json=False)
cluster_name = cluster_attrs.strip().split()[2]
endpoint = run_command(
[
"config",
"view",
"-o",
'jsonpath="{.clusters[?(@.name =="' + cluster_name + '")].cluster.server}"',
],
args["namespace"],
as_json=False,
)
service_account_info = run_command(["get", "sa", args["service_account"]], args["namespace"])
tmpdir = tempfile.TemporaryDirectory()
# Get the token and ca.crt from service account secret.
sa_secrets = list()
# Get secrets specified in the service account, there can be multiple
# of them, and not all are service account token secrets.
if "secrets" in service_account_info:
sa_secrets = [secret["name"] for secret in service_account_info["secrets"]]
# Find the existing additional service account token secrets
sa_secrets.extend(get_secrets_for_sa(args["service_account"], args["namespace"]))
endpoint = run_command(['config', 'view', '-o',
'jsonpath="{.clusters[?(@.name =="' +
cluster_name + '")].cluster.server}"'],
args['namespace'], as_json=False)
service_account_info = run_command(['get', 'sa', args['service_account']],
args['namespace'])
# some ServiceAccounts have multiple secrets, and not all them have a
# ca.crt and a token.
sa_secrets = [secret['name'] for secret in service_account_info['secrets']]
secret_data = None
for secret in sa_secrets:
secret_data = get_secret_data(secret, args["namespace"])
if secret_data is not None:
break
# Kubernetes 1.22+ doesn't create the service account token secret by
# default, we have to create one.
secret_json = run_command(['get', 'secret', secret], args['namespace'])
if 'ca.crt' not in secret_json['data'] and 'token' not in secret_json['data']:
continue
secret_data = secret_json
if secret_data is None:
print("No usable secret found for '{}', creating one.".format(args["service_account"]))
token_secret = create_sa_token_secret(tmpdir.name, args["service_account"], args["namespace"])
secret_data = get_secret_data(token_secret, args["namespace"])
if secret_data is None:
exit(
"Failed to generate kubeconfig: No usable credentials found for '{}'.".format(
args["service_account"]
)
)
exit("No usable secret found for '{}'.".format(args['service_account']))
context_name = '{}-{}'.format(args['service_account'], cluster_name)
kube_config = '/tmp/{}.conf'.format(args['service_account'])
context_name = "{}-{}".format(args["service_account"], cluster_name)
kube_config = args["output_file"]
if not kube_config:
kube_config = "/tmp/{}.conf".format(args["service_account"])
with tempfile.NamedTemporaryFile() as ca_crt_file:
ca_crt = base64.b64decode(secret_data['data']['ca.crt'])
ca_crt_file.write(ca_crt)
ca_crt_file.flush()
# create kubeconfig entry
set_cluster_cmd = ['config', 'set-cluster', cluster_name,
'--kubeconfig={}'.format(kube_config),
'--server={}'.format(endpoint.strip('"')),
'--embed-certs=true',
'--certificate-authority={}'.format(ca_crt_file.name)]
run_command(set_cluster_cmd, as_json=False)
user_token = base64.b64decode(secret_data['data']['token']).decode('utf-8')
set_credentials_cmd = ['config', 'set-credentials', context_name,
'--token={}'.format(user_token),
'--kubeconfig={}'.format(kube_config)]
run_command(set_credentials_cmd, as_json=False)
ca_crt_file_name = os.path.join(tmpdir.name, "ca.crt")
ca_crt_file = open(ca_crt_file_name, "wb")
ca_crt_file.write(base64.b64decode(secret_data["data"]["ca.crt"]))
ca_crt_file.close()
# create kubeconfig entry
set_cluster_cmd = [
"config",
"set-cluster",
cluster_name,
"--kubeconfig={}".format(kube_config),
"--server={}".format(endpoint.strip('"')),
"--embed-certs=true",
"--certificate-authority={}".format(ca_crt_file_name),
]
run_command(set_cluster_cmd, as_json=False)
user_token = base64.b64decode(secret_data["data"]["token"]).decode("utf-8")
set_credentials_cmd = [
"config",
"set-credentials",
context_name,
"--token={}".format(user_token),
"--kubeconfig={}".format(kube_config),
]
run_command(set_credentials_cmd, as_json=False, log_command=False)
set_context_cmd = [
"config",
"set-context",
context_name,
"--cluster={}".format(cluster_name),
"--user={}".format(context_name),
"--kubeconfig={}".format(kube_config),
]
set_context_cmd = ['config', 'set-context', context_name,
'--cluster={}'.format(cluster_name),
'--user={}'.format(context_name),
'--kubeconfig={}'.format(kube_config)]
run_command(set_context_cmd, as_json=False)
use_context_cmd = ["config", "use-context", context_name, "--kubeconfig={}".format(kube_config)]
use_context_cmd = ['config', 'use-context', context_name,
'--kubeconfig={}'.format(kube_config)]
run_command(use_context_cmd, as_json=False)
print("Generated the kubeconfig file: {}".format(kube_config))

View File

@ -1,4 +0,0 @@
# OCP compatible values for yugabyte
Image:
repository: "quay.io/yugabyte/yugabyte-ubi"

View File

@ -16,7 +16,7 @@ questions:
label: YugabyteDB image repository
description: "YugabyteDB image repository"
- variable: Image.tag
default: "2.5.1.0-b153"
default: "2.14.1.0-b36"
required: true
type: string
label: YugabyteDB image tag

View File

@ -26,7 +26,7 @@ Generate common labels.
{{- define "yugabyte.labels" }}
heritage: {{ .Values.helm2Legacy | ternary "Tiller" (.Release.Service | quote) }}
release: {{ .Release.Name | quote }}
chart: {{ .Chart.Name | quote }}
chart: {{ .Values.oldNamingStyle | ternary .Chart.Name (include "yugabyte.chart" .) | quote }}
component: {{ .Values.Component | quote }}
{{- if .Values.commonLabels}}
{{ toYaml .Values.commonLabels }}
@ -56,89 +56,6 @@ release: {{ .root.Release.Name | quote }}
{{- end }}
{{- end }}
{{/*
Create secrets in DBNamespace from other namespaces by iterating over envSecrets.
*/}}
{{- define "yugabyte.envsecrets" -}}
{{- range $v := .secretenv }}
{{- if $v.valueFrom.secretKeyRef.namespace }}
{{- $secretObj := (lookup
"v1"
"Secret"
$v.valueFrom.secretKeyRef.namespace
$v.valueFrom.secretKeyRef.name)
| default dict }}
{{- $secretData := (get $secretObj "data") | default dict }}
{{- $secretValue := (get $secretData $v.valueFrom.secretKeyRef.key) | default "" }}
{{- if (and (not $secretValue) (not $v.valueFrom.secretKeyRef.optional)) }}
{{- required (printf "Secret or key missing for %s/%s in namespace: %s"
$v.valueFrom.secretKeyRef.name
$v.valueFrom.secretKeyRef.key
$v.valueFrom.secretKeyRef.namespace)
nil }}
{{- end }}
{{- if $secretValue }}
apiVersion: v1
kind: Secret
metadata:
{{- $secretfullname := printf "%s-%s-%s-%s"
$.root.Release.Name
$v.valueFrom.secretKeyRef.namespace
$v.valueFrom.secretKeyRef.name
$v.valueFrom.secretKeyRef.key
}}
name: {{ printf "%s-%s-%s-%s-%s-%s"
$.root.Release.Name
($v.valueFrom.secretKeyRef.namespace | substr 0 5)
($v.valueFrom.secretKeyRef.name | substr 0 5)
( $v.valueFrom.secretKeyRef.key | substr 0 5)
(sha256sum $secretfullname | substr 0 4)
($.suffix)
| lower | replace "." "" | replace "_" ""
}}
namespace: "{{ $.root.Release.Namespace }}"
labels:
{{- include "yugabyte.labels" $.root | indent 4 }}
type: Opaque # should it be an Opaque secret?
data:
{{ $v.valueFrom.secretKeyRef.key }}: {{ $secretValue | quote }}
{{- end }}
{{- end }}
---
{{- end }}
{{- end }}
{{/*
Add env secrets to DB statefulset.
*/}}
{{- define "yugabyte.addenvsecrets" -}}
{{- range $v := .secretenv }}
- name: {{ $v.name }}
valueFrom:
secretKeyRef:
{{- if $v.valueFrom.secretKeyRef.namespace }}
{{- $secretfullname := printf "%s-%s-%s-%s"
$.root.Release.Name
$v.valueFrom.secretKeyRef.namespace
$v.valueFrom.secretKeyRef.name
$v.valueFrom.secretKeyRef.key
}}
name: {{ printf "%s-%s-%s-%s-%s-%s"
$.root.Release.Name
($v.valueFrom.secretKeyRef.namespace | substr 0 5)
($v.valueFrom.secretKeyRef.name | substr 0 5)
($v.valueFrom.secretKeyRef.key | substr 0 5)
(sha256sum $secretfullname | substr 0 4)
($.suffix)
| lower | replace "." "" | replace "_" ""
}}
{{- else }}
name: {{ $v.valueFrom.secretKeyRef.name }}
{{- end }}
key: {{ $v.valueFrom.secretKeyRef.key }}
optional: {{ $v.valueFrom.secretKeyRef.optional | default "false" }}
{{- end }}
{{- end }}
{{/*
Create Volume name.
*/}}
@ -167,21 +84,18 @@ Generate a preflight check script invocation.
*/}}
{{- define "yugabyte.preflight_check" -}}
{{- if not .Values.preflight.skipAll -}}
{{- $port := .Preflight.Port -}}
{{- range $addr := split "," .Preflight.Addr -}}
if [ -f /home/yugabyte/tools/k8s_preflight.py ]; then
PYTHONUNBUFFERED="true" /home/yugabyte/tools/k8s_preflight.py \
dnscheck \
--addr="{{ $addr }}" \
{{- if not $.Values.preflight.skipBind }}
--port="{{ $port }}"
--addr="{{ .Preflight.Addr }}" \
{{- if not .Values.preflight.skipBind }}
--port="{{ .Preflight.Port }}"
{{- else }}
--skip_bind
{{- end }}
fi && \
{{ end }}
{{- end }}
{{- end }}
{{- end -}}
{{- end -}}
{{/*
Get YugaByte fs data directories.
@ -216,20 +130,12 @@ echo "disk check at: $(date)" \
Generate server FQDN.
*/}}
{{- define "yugabyte.server_fqdn" -}}
{{- if .Values.multicluster.createServicePerPod -}}
{{- if (and .Values.istioCompatibility.enabled .Values.multicluster.createServicePerPod) -}}
{{- printf "$(HOSTNAME).$(NAMESPACE).svc.%s" .Values.domainName -}}
{{- else if (and .Values.oldNamingStyle .Values.multicluster.createServiceExports) -}}
{{ $membershipName := required "A valid membership name is required! Please set multicluster.kubernetesClusterId" .Values.multicluster.kubernetesClusterId }}
{{- printf "$(HOSTNAME).%s.%s.$(NAMESPACE).svc.clusterset.local" $membershipName .Service.name -}}
{{- else if .Values.oldNamingStyle -}}
{{- printf "$(HOSTNAME).%s.$(NAMESPACE).svc.%s" .Service.name .Values.domainName -}}
{{- else -}}
{{- if .Values.multicluster.createServiceExports -}}
{{ $membershipName := required "A valid membership name is required! Please set multicluster.kubernetesClusterId" .Values.multicluster.kubernetesClusterId }}
{{- printf "$(HOSTNAME).%s.%s-%s.$(NAMESPACE).svc.clusterset.local" $membershipName (include "yugabyte.fullname" .) .Service.name -}}
{{- else -}}
{{- printf "$(HOSTNAME).%s-%s.$(NAMESPACE).svc.%s" (include "yugabyte.fullname" .) .Service.name .Values.domainName -}}
{{- end -}}
{{- printf "$(HOSTNAME).%s-%s.$(NAMESPACE).svc.%s" (include "yugabyte.fullname" .) .Service.name .Values.domainName -}}
{{- end -}}
{{- end -}}
@ -242,25 +148,10 @@ Generate server broadcast address.
{{/*
Generate server RPC bind address.
In case of multi-cluster services (MCS), we set it to $(POD_IP) to
ensure YCQL uses a resolvable address.
See https://github.com/yugabyte/yugabyte-db/issues/16155
We use a workaround for above in case of Istio by setting it to
$(POD_IP) and localhost. Master doesn't support that combination, so
we stick to 0.0.0.0, which works for master.
*/}}
{{- define "yugabyte.rpc_bind_address" -}}
{{- $port := index .Service.ports "tcp-rpc-port" -}}
{{- if .Values.istioCompatibility.enabled -}}
{{- if (eq .Service.name "yb-masters") -}}
0.0.0.0:{{ $port }}
{{- else -}}
$(POD_IP):{{ $port }},127.0.0.1:{{ $port }}
{{- end -}}
{{- else if (or .Values.multicluster.createServiceExports .Values.multicluster.createServicePerPod) -}}
$(POD_IP):{{ $port }}
0.0.0.0:{{ index .Service.ports "tcp-rpc-port" -}}
{{- else -}}
{{- include "yugabyte.server_fqdn" . -}}
{{- end -}}
@ -277,7 +168,7 @@ Generate server web interface.
Generate server CQL proxy bind address.
*/}}
{{- define "yugabyte.cql_proxy_bind_address" -}}
{{- if or .Values.istioCompatibility.enabled .Values.multicluster.createServiceExports .Values.multicluster.createServicePerPod -}}
{{- if .Values.istioCompatibility.enabled -}}
0.0.0.0:{{ index .Service.ports "tcp-yql-port" -}}
{{- else -}}
{{- include "yugabyte.server_fqdn" . -}}
@ -322,10 +213,10 @@ Compute the maximum number of unavailable pods based on the number of master rep
Set consistent issuer name.
*/}}
{{- define "yugabyte.tls_cm_issuer" -}}
{{- if .Values.tls.certManager.bootstrapSelfsigned -}}
{{ .Values.oldNamingStyle | ternary "yugabyte-selfsigned" (printf "%s-selfsigned" (include "yugabyte.fullname" .)) }}
{{- if .Values.tls.certManager.useClusterIssuer -}}
{{ .Values.tls.certManager.clusterIssuer }}
{{- else -}}
{{ .Values.tls.certManager.useClusterIssuer | ternary .Values.tls.certManager.clusterIssuer .Values.tls.certManager.issuer}}
{{ .Values.oldNamingStyle | ternary "yugabyte-selfsigned" (printf "%s-selfsigned" (include "yugabyte.fullname" .)) }}
{{- end -}}
{{- end -}}
@ -365,51 +256,3 @@ Set consistent issuer name.
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Default nodeAffinity for multi-az deployments
*/}}
{{- define "yugabyte.multiAZNodeAffinity" -}}
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: failure-domain.beta.kubernetes.io/zone
operator: In
values:
- {{ quote .Values.AZ }}
- matchExpressions:
- key: topology.kubernetes.io/zone
operator: In
values:
- {{ quote .Values.AZ }}
{{- end -}}
{{/*
Default podAntiAffinity for master and tserver
This requires "appLabelArgs" to be passed in - defined in service.yaml
we have a .root and a .label in appLabelArgs
*/}}
{{- define "yugabyte.podAntiAffinity" -}}
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
{{- if .root.Values.oldNamingStyle }}
- key: app
operator: In
values:
- "{{ .label }}"
{{- else }}
- key: app.kubernetes.io/name
operator: In
values:
- "{{ .label }}"
- key: release
operator: In
values:
- {{ .root.Release.Name | quote }}
{{- end }}
topologyKey: kubernetes.io/hostname
{{- end -}}

View File

@ -1,7 +1,7 @@
{{- $root := . -}}
---
{{- if $root.Values.tls.certManager.enabled }}
{{- if $root.Values.tls.certManager.bootstrapSelfsigned }}
{{- if not $root.Values.tls.certManager.useClusterIssuer }}
---
apiVersion: cert-manager.io/v1
kind: Issuer
@ -37,38 +37,13 @@ spec:
ca:
secretName: {{ $root.Values.oldNamingStyle | ternary "yugabyte-ca" (printf "%s-ca" (include "yugabyte.fullname" $root)) }}
---
{{- else }}
{{/* when bootstrapSelfsigned = false, ie. when using an external CA.
Create a Secret with just the rootCA.cert value and mount into master/tserver pods.
This will be used as a fall back in case the Secret generated by cert-manager does not
have a root ca.crt. This can happen for certain certificate issuers like LetsEncrypt.
*/}}
---
apiVersion: v1
kind: Secret
metadata:
name: {{ printf "%s-root-ca" (include "yugabyte.fullname" $root) }}
namespace: "{{ $root.Release.Namespace }}"
labels:
{{- include "yugabyte.labels" $root | indent 4 }}
type: Opaque
data:
ca.crt: {{ $root.Values.tls.rootCA.cert }}
---
{{- end }}
{{/*
The below Certificate resource will trigger cert-manager to issue crt/key into Secrets.
These secrets are mounted into master/tserver pods.
*/}}
{{- range .Values.Services }}
{{- $service := . -}}
{{- $appLabelArgs := dict "label" .label "root" $root -}}
{{- $serviceValues := (dict "Service" $service "Values" $root.Values "Chart" $root.Chart "Release" $root.Release) -}}
{{- $replicas := (eq .name "yb-masters") | ternary $root.Values.replicas.master $root.Values.replicas.tserver -}}
{{- if (gt (int $replicas) 0) }}
---
apiVersion: cert-manager.io/v1
kind: Certificate
@ -90,29 +65,28 @@ spec:
secretName: {{ $root.Values.oldNamingStyle | ternary (printf "%s-yugabyte-tls-cert" $service.label) (printf "%s-%s-tls-cert" (include "yugabyte.fullname" $root) $service.label) }}
duration: {{ $root.Values.tls.certManager.certificates.duration | quote }}
renewBefore: {{ $root.Values.tls.certManager.certificates.renewBefore | quote }}
commonName: yugabyte-{{ .name }}
isCA: false
privateKey:
algorithm: {{ $root.Values.tls.certManager.certificates.algorithm | quote }}
encoding: PKCS8
size: {{ $root.Values.tls.certManager.certificates.keySize }}
rotationPolicy: Always
usages:
- server auth
- client auth
# At least one of a DNS Name, URI, or IP address is required.
dnsNames:
{{- $replicas := (eq .name "yb-masters") | ternary $root.Values.replicas.master $root.Values.replicas.tserver -}}
{{- range $index := until ( int ( $replicas ) ) }}
{{- $nodeOldStyle := printf "%s-%d.%s.%s.svc.%s" $service.label $index $service.name $root.Release.Namespace $root.Values.domainName }}
{{- $nodeNewStyle := printf "%s-%s-%d.%s-%s.%s.svc.%s" (include "yugabyte.fullname" $root) $service.label $index (include "yugabyte.fullname" $root) $service.name $root.Release.Namespace $root.Values.domainName }}
{{- $node := $root.Values.oldNamingStyle | ternary $nodeOldStyle $nodeNewStyle }}
- {{$node}}
{{- end }}
- {{ printf "%s-%s.%s.svc.%s" (include "yugabyte.fullname" $root) $service.name $root.Release.Namespace $root.Values.domainName }}
uris: []
ipAddresses: []
---
{{- end }}
{{- end }}
---
apiVersion: cert-manager.io/v1
@ -140,7 +114,6 @@ spec:
algorithm: {{ $root.Values.tls.certManager.certificates.algorithm | quote }}
encoding: PKCS8
size: {{ $root.Values.tls.certManager.certificates.keySize }}
rotationPolicy: Always
usages:
- client auth
dnsNames: []

View File

@ -1,23 +0,0 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "yugabyte.fullname" . }}-master-hooks
namespace: "{{ .Release.Namespace }}"
data:
{{- range $index := until ( int ( .Values.replicas.master ) ) }}
yb-master-{{.}}-pre_debug_hook.sh: "echo 'hello-from-pre' "
yb-master-{{.}}-post_debug_hook.sh: "echo 'hello-from-post' "
{{- end }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "yugabyte.fullname" . }}-tserver-hooks
namespace: "{{ .Release.Namespace }}"
data:
{{- range $index := until ( int ( .Values.replicas.tserver) ) }}
yb-tserver-{{.}}-pre_debug_hook.sh: "echo 'hello-from-pre' "
yb-tserver-{{.}}-post_debug_hook.sh: "echo 'hello-from-post' "
{{- end }}
---

View File

@ -11,19 +11,11 @@ metadata:
labels:
{{- include "yugabyte.applabel" ($appLabelArgs) | indent 4 }}
{{- include "yugabyte.labels" $ | indent 4 }}
service-type: "non-endpoint"
spec:
ports:
{{- range $label, $port := $server.ports }}
{{- if (eq $label "grpc-ybc-port") }}
{{- if $.Values.ybc.enabled }}
- name: {{ $label | quote }}
port: {{ $port }}
{{- end }}
{{- else }}
- name: {{ $label | quote }}
port: {{ $port }}
{{- end }}
{{- end}}
selector:
statefulset.kubernetes.io/pod-name: {{ $podName | quote }}

View File

@ -1,21 +0,0 @@
{{- /*
Ref - https://cloud.google.com/kubernetes-engine/docs/how-to/multi-cluster-services#registering_a_service_for_export
https://github.com/kubernetes/enhancements/tree/master/keps/sig-multicluster/1645-multi-cluster-services-api#exporting-services
*/}}
{{- if .Values.multicluster.createServiceExports }}
apiVersion: {{ .Values.multicluster.mcsApiVersion }}
kind: ServiceExport
metadata:
name: {{ .Values.oldNamingStyle | ternary "yb-masters" (printf "%s-%s" (include "yugabyte.fullname" .) "yb-masters") | quote }}
namespace: "{{ .Release.Namespace }}"
labels:
{{- include "yugabyte.labels" . | indent 4 }}
---
apiVersion: {{ .Values.multicluster.mcsApiVersion }}
kind: ServiceExport
metadata:
name: {{ .Values.oldNamingStyle | ternary "yb-tservers" (printf "%s-%s" (include "yugabyte.fullname" .) "yb-tservers") | quote }}
namespace: "{{ .Release.Namespace }}"
labels:
{{- include "yugabyte.labels" . | indent 4 }}
{{ end -}}

View File

@ -1,7 +0,0 @@
{{- $root := . -}}
--- # Create secrets from other namespaces for masters.
{{- $data := dict "secretenv" $.Values.master.secretEnv "root" . "suffix" "master"}}
{{- include "yugabyte.envsecrets" $data }}
--- # Create secrets from other namespaces for tservers.
{{- $data := dict "secretenv" $.Values.tserver.secretEnv "root" . "suffix" "tserver" }}
{{- include "yugabyte.envsecrets" $data }}

View File

@ -24,7 +24,7 @@ data:
{{- end }}
---
{{- end }}
---
{{- range .Values.Services }}
{{- $service := . -}}
{{- $appLabelArgs := dict "label" .label "root" $root -}}
@ -46,29 +46,12 @@ data:
{{- range $index := until ( int ( $replicas ) ) }}
{{- $nodeOldStyle := printf "%s-%d.%s.%s.svc.%s" $service.label $index $service.name $root.Release.Namespace $root.Values.domainName }}
{{- $nodeNewStyle := printf "%s-%s-%d.%s-%s.%s.svc.%s" (include "yugabyte.fullname" $root) $service.label $index (include "yugabyte.fullname" $root) $service.name $root.Release.Namespace $root.Values.domainName }}
{{- if $root.Values.multicluster.createServiceExports -}}
{{- $nodeOldStyle = printf "%s-%d.%s.%s.%s.svc.clusterset.local" $service.label $index $root.Values.multicluster.kubernetesClusterId $service.name $root.Release.Namespace }}
{{- $nodeNewStyle = printf "%s-%s-%d.%s.%s-%s.%s.svc.clusterset.local" (include "yugabyte.fullname" $root) $service.label $index $root.Values.multicluster.kubernetesClusterId (include "yugabyte.fullname" $root) $service.name $root.Release.Namespace }}
{{- end -}}
{{- if $root.Values.multicluster.createServicePerPod -}}
{{- $nodeOldStyle = printf "%s-%d.%s.svc.%s" $service.label $index $root.Release.Namespace $root.Values.domainName }}
{{- $nodeNewStyle = printf "%s-%s-%d.%s.svc.%s" (include "yugabyte.fullname" $root) $service.label $index $root.Release.Namespace $root.Values.domainName }}
{{- end -}}
{{- $node := $root.Values.oldNamingStyle | ternary $nodeOldStyle $nodeNewStyle }}
{{- if $root.Values.tls.rootCA.key }}
{{- $dns1 := printf "*.%s-%s.%s" (include "yugabyte.fullname" $root) $service.name $root.Release.Namespace }}
{{- $dns1 := printf "*.*.%s" $root.Release.Namespace }}
{{- $dns2 := printf "%s.svc.%s" $dns1 $root.Values.domainName }}
{{- if $root.Values.multicluster.createServiceExports -}}
{{- $dns1 = printf "*.%s.%s-%s.%s.svc.clusterset.local" $root.Values.multicluster.kubernetesClusterId (include "yugabyte.fullname" $root) $service.name $root.Release.Namespace }}
{{- end -}}
{{- if $root.Values.multicluster.createServicePerPod -}}
{{- $dns1 = printf "*.%s.svc.%s" $root.Release.Namespace $root.Values.domainName }}
{{- end -}}
{{- $rootCA := buildCustomCert $root.Values.tls.rootCA.cert $root.Values.tls.rootCA.key -}}
{{- $server := genSignedCert $node ( default nil ) (list $node $dns1 $dns2 ) 3650 $rootCA }}
{{- $server := genSignedCert $node ( default nil ) (list $dns1 $dns2 ) 3650 $rootCA }}
node.{{$node}}.crt: {{ $server.Cert | b64enc }}
node.{{$node}}.key: {{ $server.Key | b64enc }}
{{- else }}
@ -92,20 +75,13 @@ spec:
clusterIP: None
ports:
{{- range $label, $port := .ports }}
{{- if (eq $label "grpc-ybc-port") }}
{{- if $root.Values.ybc.enabled }}
- name: {{ $label | quote }}
port: {{ $port }}
{{- end }}
{{- else }}
- name: {{ $label | quote }}
port: {{ $port }}
{{- end }}
{{- end}}
selector:
{{- include "yugabyte.appselector" ($appLabelArgs) | indent 4 }}
{{- if $root.Values.enableLoadBalancer }}
{{ if $root.Values.enableLoadBalancer }}
{{- range $endpoint := $root.Values.serviceEndpoints }}
{{- if eq $service.label $endpoint.app }}
---
@ -118,12 +94,11 @@ metadata:
labels:
{{- include "yugabyte.applabel" ($appLabelArgs) | indent 4 }}
{{- include "yugabyte.labels" $root | indent 4 }}
service-type: "endpoint"
spec:
{{- if eq $root.Release.Service "Tiller" }}
{{ if eq $root.Release.Service "Tiller" }}
clusterIP:
{{- else }}
{{- if $endpoint.clusterIP }}
{{ else }}
{{ if $endpoint.clusterIP }}
clusterIP: {{ $endpoint.clusterIP }}
{{- end }}
{{- end }}
@ -141,7 +116,7 @@ spec:
{{- end }}
{{- end}}
{{- end}}
{{- end}}
{{ end }}
---
apiVersion: apps/v1
@ -222,9 +197,6 @@ spec:
labels:
{{- include "yugabyte.applabel" ($appLabelArgs) | indent 8 }}
{{- include "yugabyte.labels" $root | indent 8 }}
{{- if $root.Values.istioCompatibility.enabled }}
sidecar.istio.io/inject: "true"
{{- end }}
{{- if eq .name "yb-masters" }}
{{- with $root.Values.master.podLabels }}{{ toYaml . | nindent 8 }}{{ end }}
{{- else }}
@ -242,95 +214,62 @@ spec:
nodeSelector:
{{ toYaml $root.Values.nodeSelector | indent 8 }}
{{- end }}
terminationGracePeriodSeconds: 300
{{- if eq .name "yb-masters" }} # yb-masters
{{- with $root.Values.master.serviceAccount }}
serviceAccountName: {{ . }}
{{- end }}
{{- if $root.Values.master.tolerations }}
tolerations:
{{- with $root.Values.master.tolerations }}{{ toYaml . | nindent 8 }}{{ end }}
{{- end }}
{{- else }} # yb-tservers
{{- with $root.Values.tserver.serviceAccount }}
serviceAccountName: {{ . }}
{{- end }}
{{- if $root.Values.tserver.tolerations }}
tolerations:
{{- with $root.Values.tserver.tolerations }}{{ toYaml . | nindent 8 }}{{ end }}
{{- end }}
{{- end }}
terminationGracePeriodSeconds: 300
affinity:
# Set the anti-affinity selector scope to YB masters and tservers.
{{- $nodeAffinityData := dict}}
{{- if eq .name "yb-masters" -}}
{{- $nodeAffinityData = get $root.Values.master.affinity "nodeAffinity" | default (dict) -}}
{{- else -}}
{{- $nodeAffinityData = get $root.Values.tserver.affinity "nodeAffinity" | default (dict) -}}
{{- end -}}
# Set the anti-affinity selector scope to YB masters.
{{ if $root.Values.AZ }}
{{- $userSelectorTerms := dig "requiredDuringSchedulingIgnoredDuringExecution" "nodeSelectorTerms" "" $nodeAffinityData | default (list) -}}
{{- $baseAffinity := include "yugabyte.multiAZNodeAffinity" $root | fromYaml -}}
{{- $requiredSchedule := (list) -}}
{{- if $userSelectorTerms -}}
{{- range $userSelectorTerms -}}
{{- $userTerm := . -}}
{{- range $baseAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms -}}
{{- $matchExpr := concat .matchExpressions $userTerm.matchExpressions | dict "matchExpressions" -}}
{{- $requiredSchedule = mustMerge $matchExpr $userTerm | append $requiredSchedule -}}
{{- end -}}
{{- end -}}
{{- else -}}
{{- $requiredSchedule = $baseAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms -}}
{{- end -}}
{{- with $baseAffinity.requiredDuringSchedulingIgnoredDuringExecution -}}
{{- $_ := set . "nodeSelectorTerms" $requiredSchedule -}}
{{- end -}}
{{- $nodeAffinityData = mustMerge $baseAffinity $nodeAffinityData -}}
{{- end -}}
{{- $podAntiAffinityData := dict -}}
{{- $basePodAntiAffinity := include "yugabyte.podAntiAffinity" ($appLabelArgs) | fromYaml -}}
{{- if eq .name "yb-masters" -}}
{{- with $root.Values.master.affinity -}}
{{- $userPodAntiAffinity := get . "podAntiAffinity" | default (dict) -}}
{{- if $userPodAntiAffinity -}}
{{- $preferredList := dig "preferredDuringSchedulingIgnoredDuringExecution" "" $userPodAntiAffinity | default (list) | concat $basePodAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution}}
{{- $_ := set $basePodAntiAffinity "preferredDuringSchedulingIgnoredDuringExecution" $preferredList -}}
{{- end -}}
{{- $podAntiAffinityData = mustMerge $basePodAntiAffinity $userPodAntiAffinity -}}
{{- end -}}
{{- else -}}
{{- with $root.Values.tserver.affinity -}}
{{- $userPodAntiAffinity := get . "podAntiAffinity" | default (dict) -}}
{{- if $userPodAntiAffinity -}}
{{- $preferredList := dig "preferredDuringSchedulingIgnoredDuringExecution" "" $userPodAntiAffinity | default (list) | concat $basePodAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution}}
{{- $_ := set $basePodAntiAffinity "preferredDuringSchedulingIgnoredDuringExecution" $preferredList -}}
{{- end -}}
{{- $podAntiAffinityData = mustMerge $basePodAntiAffinity $userPodAntiAffinity -}}
{{- end -}}
{{- end -}}
{{- if eq .name "yb-masters" -}}
{{- if $nodeAffinityData -}}
{{- $_ := set $root.Values.master.affinity "nodeAffinity" $nodeAffinityData -}}
{{- end -}}
{{- $_ := set $root.Values.master.affinity "podAntiAffinity" $podAntiAffinityData -}}
{{ toYaml $root.Values.master.affinity | nindent 8 }}
{{- else -}}
{{- if $nodeAffinityData -}}
{{- $_ := set $root.Values.tserver.affinity "nodeAffinity" $nodeAffinityData -}}
{{- end -}}
{{- $_ := set $root.Values.tserver.affinity "podAntiAffinity" $podAntiAffinityData -}}
{{ toYaml $root.Values.tserver.affinity | nindent 8 }}
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: failure-domain.beta.kubernetes.io/zone
operator: In
values:
- {{ $root.Values.AZ }}
- matchExpressions:
- key: topology.kubernetes.io/zone
operator: In
values:
- {{ $root.Values.AZ }}
{{ end }}
{{- with $root.Values.dnsConfig }}
dnsConfig: {{- toYaml . | nindent 8 }}
{{- end }}
{{- with $root.Values.dnsPolicy }}
dnsPolicy: {{ . | quote }}
{{- end }}
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
{{- if $root.Values.oldNamingStyle }}
- key: app
operator: In
values:
- "{{ .label }}"
{{- else }}
- key: app.kubernetes.io/name
operator: In
values:
- "{{ .label }}"
- key: release
operator: In
values:
- {{ $root.Release.Name | quote }}
{{- end }}
topologyKey: kubernetes.io/hostname
{{- if eq .name "yb-masters" }}
{{- with $root.Values.master.affinity }}{{ toYaml . | nindent 8 }}{{ end }}
{{- else }}
{{- with $root.Values.tserver.affinity }}{{ toYaml . | nindent 8 }}{{ end }}
{{- end }}
containers:
- name: "{{ .label }}"
image: "{{ $root.Values.Image.repository }}:{{ $root.Values.Image.tag }}"
@ -382,20 +321,18 @@ spec:
- name: YBDEVOPS_CORECOPY_DIR
value: "/mnt/disk0/cores"
{{- if eq .name "yb-masters" }}
{{- with $root.Values.master.extraEnv }}{{ toYaml . | nindent 8 }}{{- end }}
{{- $data := dict "secretenv" $root.Values.master.secretEnv "root" $root "suffix" "master"}}
{{- include "yugabyte.addenvsecrets" $data | nindent 8 }}
{{- with $root.Values.master.extraEnv }}{{ toYaml . | nindent 8 }}{{ end }}
{{- with $root.Values.master.secretEnv }}{{ toYaml . | nindent 8 }}{{ end }}
{{- else }}
{{- with $root.Values.tserver.extraEnv }}{{ toYaml . | nindent 8 }}{{- end }}
{{- $data := dict "secretenv" $root.Values.tserver.secretEnv "root" $root "suffix" "tserver" }}
{{- include "yugabyte.addenvsecrets" $data | nindent 8 }}
{{- with $root.Values.tserver.extraEnv }}{{ toYaml . | nindent 8 }}{{ end }}
{{- with $root.Values.tserver.secretEnv }}{{ toYaml . | nindent 8 }}{{ end }}
{{- end }}
{{- if and $root.Values.tls.enabled $root.Values.tls.clientToServer (ne .name "yb-masters") }}
- name: SSL_CERTFILE
value: /root/.yugabytedb/root.crt
{{- end }}
resources:
{{- if eq .name "yb-masters" }}
{{ if eq .name "yb-masters" }}
{{ toYaml $root.Values.resource.master | indent 10 }}
{{ else }}
{{ toYaml $root.Values.resource.tserver | indent 10 }}
@ -426,13 +363,10 @@ spec:
{{- $rpcPreflight := include "yugabyte.preflight_check" (set $serviceValues "Preflight" $rpcDict) -}}
{{- if $rpcPreflight -}}{{ $rpcPreflight | nindent 12 }}{{ end -}}
{{- $broadcastAddr := include "yugabyte.server_broadcast_address" $serviceValues -}}
{{/* skip bind check for servicePerPod multi-cluster, we cannot/don't bind to service IP */}}
{{- if not $root.Values.multicluster.createServicePerPod }}
{{- $broadcastPort := index $service.ports "tcp-rpc-port" -}}
{{- $broadcastDict := dict "Addr" $broadcastAddr "Port" $broadcastPort -}}
{{- $broadcastPreflight := include "yugabyte.preflight_check" (set $serviceValues "Preflight" $broadcastDict) -}}
{{- if $broadcastPreflight -}}{{ $broadcastPreflight | nindent 12 }}{{ end -}}
{{- end }}
{{- $broadcastPort := index $service.ports "tcp-rpc-port" -}}
{{- $broadcastDict := dict "Addr" $broadcastAddr "Port" $broadcastPort -}}
{{- $broadcastPreflight := include "yugabyte.preflight_check" (set $serviceValues "Preflight" $broadcastDict) -}}
{{- if $broadcastPreflight -}}{{ $broadcastPreflight | nindent 12 }}{{ end -}}
{{- $webserverAddr := include "yugabyte.webserver_interface" $serviceValues -}}
{{- $webserverPort := index $service.ports "http-ui" -}}
{{- $webserverDict := dict "Addr" $webserverAddr "Port" $webserverPort -}}
@ -443,25 +377,6 @@ spec:
else
k8s_parent=""
fi && \
{{- if and $root.Values.tls.enabled $root.Values.tls.certManager.enabled }}
echo "Creating ephemeral /opt/certs/yugabyte/ as symlink to persisted /mnt/disk0/certs/" && \
mkdir -p /mnt/disk0/certs && \
mkdir -p /opt/certs && \
ln -s /mnt/disk0/certs /opt/certs/yugabyte && \
if [[ ! -f /opt/certs/yugabyte/ca.crt ]]; then
echo "Fresh install of /opt/certs/yugabyte/ca.crt"
cp /home/yugabyte/cert-manager/ca.crt /opt/certs/yugabyte/ca.crt;
fi && \
cmp -s /home/yugabyte/cert-manager/ca.crt /opt/certs/yugabyte/ca.crt;sameRootCA=$? && \
if [[ $sameRootCA -eq 0 ]]; then
echo "Refreshing tls certs at /opt/certs/yugabyte/";
cp /home/yugabyte/cert-manager/tls.crt /opt/certs/yugabyte/node.{{$rpcAddr}}.crt;
cp /home/yugabyte/cert-manager/tls.key /opt/certs/yugabyte/node.{{$rpcAddr}}.key;
chmod 600 /opt/certs/yugabyte/*
else
echo "WARNING: Not refreshing certificates as the root ca.crt has changed"
fi && \
{{- end }}
{{- if eq .name "yb-masters" }}
exec ${k8s_parent} /home/yugabyte/bin/yb-master \
{{- if not $root.Values.storage.ephemeral }}
@ -565,18 +480,10 @@ spec:
{{- end }}
ports:
{{- range $label, $port := .ports }}
{{- if not (eq $label "grpc-ybc-port") }}
- containerPort: {{ $port }}
name: {{ $label | quote }}
{{- end }}
{{- end}}
volumeMounts:
{{- if (eq .name "yb-tservers") }}
- name: tserver-tmp
mountPath: /tmp
{{- end }}
- name: debug-hooks-volume
mountPath: /opt/debug_hooks_config
{{ if not $root.Values.storage.ephemeral }}
{{- range $index := until (int ($storageInfo.count)) }}
- name: {{ $root.Values.oldNamingStyle | ternary (printf "datadir%d" $index) (printf "%s%d" (include "yugabyte.volume_name" $root) $index) }}
@ -585,7 +492,7 @@ spec:
{{- end }}
{{- if $root.Values.tls.enabled }}
- name: {{ $root.Values.oldNamingStyle | ternary (printf "%s-yugabyte-tls-cert" .label) (printf "%s-%s-tls-cert" (include "yugabyte.fullname" $root) .label) }}
mountPath: {{ $root.Values.tls.certManager.enabled | ternary "/home/yugabyte/cert-manager" "/opt/certs/yugabyte" }}
mountPath: /opt/certs/yugabyte
readOnly: true
- name: {{ $root.Values.oldNamingStyle | ternary "yugabyte-tls-client-cert" (printf "%s-client-tls" (include "yugabyte.fullname" $root)) }}
mountPath: /root/.yugabytedb/
@ -624,86 +531,9 @@ spec:
- name: {{ $root.Values.oldNamingStyle | ternary "datadir0" (printf "%s0" (include "yugabyte.volume_name" $root)) }}
mountPath: /var/yugabyte/cores
subPath: cores
{{- if $root.Values.ybCleanup.resources }}
resources: {{ toYaml $root.Values.ybCleanup.resources | nindent 10 }}
{{- end }}
{{- end }}
{{- if and (eq .name "yb-tservers") ($root.Values.ybc.enabled) }}
- name: yb-controller
image: "{{ $root.Values.Image.repository }}:{{ $root.Values.Image.tag }}"
imagePullPolicy: {{ $root.Values.Image.pullPolicy }}
lifecycle:
postStart:
exec:
command:
- "bash"
- "-c"
- >
mkdir -p /mnt/disk0/yw-data/controller/tmp;
mkdir -p /mnt/disk0/yw-data/controller/conf;
mkdir -p /mnt/disk0/ybc-data/controller/logs;
mkdir -p /tmp/yugabyte/controller;
ln -sf /mnt/disk0/ybc-data/controller/logs /tmp/yugabyte/controller;
ln -sf /mnt/disk0/yw-data/controller/bin /tmp/yugabyte/controller;
rm -f /tmp/yugabyte/controller/yb-controller.pid;
{{- if and $root.Values.tls.enabled $root.Values.tls.certManager.enabled }}
mkdir -p /opt/certs;
ln -sf /mnt/disk0/certs /opt/certs/yugabyte;
{{- end }}
command:
- "/sbin/tini"
- "--"
args:
- "/bin/bash"
- "-c"
- >
while true; do
sleep 60;
/home/yugabyte/tools/k8s_ybc_parent.py status || /home/yugabyte/tools/k8s_ybc_parent.py start;
done
{{- with index $service.ports "grpc-ybc-port" }}
ports:
- containerPort: {{ . }}
name: "grpc-ybc-port"
{{- end }}
volumeMounts:
- name: tserver-tmp
mountPath: /tmp
{{- if not $root.Values.storage.ephemeral }}
{{- range $index := until (int ($storageInfo.count)) }}
- name: {{ $root.Values.oldNamingStyle | ternary (printf "datadir%d" $index) (printf "%s%d" (include "yugabyte.volume_name" $root) $index) }}
mountPath: /mnt/disk{{ $index }}
{{- end }}
{{- end }}
{{- if $root.Values.tls.enabled }}
- name: {{ $root.Values.oldNamingStyle | ternary (printf "%s-yugabyte-tls-cert" .label) (printf "%s-%s-tls-cert" (include "yugabyte.fullname" $root) .label) }}
mountPath: {{ $root.Values.tls.certManager.enabled | ternary "/home/yugabyte/cert-manager" "/opt/certs/yugabyte" }}
readOnly: true
{{- end }}
{{- if ($root.Values.tserver.extraVolumeMounts) -}}
{{- include "yugabyte.isExtraVolumesMappingExists" $root.Values.tserver -}}
{{- $root.Values.tserver.extraVolumeMounts | toYaml | nindent 10 -}}
{{- end -}}
{{- if $root.Values.ybc.resources }}
resources: {{ toYaml $root.Values.ybc.resources | nindent 10 }}
{{- end }}
{{- end}}
volumes:
{{- if (eq .name "yb-masters") }}
- name: debug-hooks-volume
configMap:
name: {{ include "yugabyte.fullname" $root }}-master-hooks
defaultMode: 0755
{{- else if (eq .name "yb-tservers") }}
- name: debug-hooks-volume
configMap:
name: {{ include "yugabyte.fullname" $root }}-tserver-hooks
defaultMode: 0755
- name: tserver-tmp
emptyDir: {}
{{- end }}
{{ if not $root.Values.storage.ephemeral }}
{{- range $index := until (int ($storageInfo.count)) }}
- name: {{ $root.Values.oldNamingStyle | ternary (printf "datadir%d" $index) (printf "%s%d" (include "yugabyte.volume_name" $root) $index) }}
@ -712,24 +542,25 @@ spec:
{{- end }}
{{- end }}
{{- if $root.Values.tls.enabled }}
{{- if $root.Values.tls.certManager.enabled }}
{{- /* certManager enabled */}}
- name: {{ $root.Values.oldNamingStyle | ternary (printf "%s-yugabyte-tls-cert" .label) (printf "%s-%s-tls-cert" (include "yugabyte.fullname" $root) .label) }}
projected:
sources:
{{- if not $root.Values.tls.certManager.bootstrapSelfsigned }}
- secret:
name: {{ printf "%s-root-ca" (include "yugabyte.fullname" $root) }}
{{- end }}
- secret:
name: {{ $root.Values.oldNamingStyle | ternary (printf "%s-yugabyte-tls-cert" .label) (printf "%s-%s-tls-cert" (include "yugabyte.fullname" $root) .label) }}
{{- else }}
{{/* certManager disabled */}}
- name: {{ $root.Values.oldNamingStyle | ternary (printf "%s-yugabyte-tls-cert" .label) (printf "%s-%s-tls-cert" (include "yugabyte.fullname" $root) .label) }}
secret:
secretName: {{ $root.Values.oldNamingStyle | ternary (printf "%s-yugabyte-tls-cert" .label) (printf "%s-%s-tls-cert" (include "yugabyte.fullname" $root) .label) }}
{{- if $root.Values.tls.certManager.enabled }}
items:
{{- $replicas := (eq .name "yb-masters") | ternary $root.Values.replicas.master $root.Values.replicas.tserver -}}
{{- range $index := until ( int ( $replicas ) ) }}
{{- $nodeOldStyle := printf "%s-%d.%s.%s.svc.%s" $service.label $index $service.name $root.Release.Namespace $root.Values.domainName }}
{{- $nodeNewStyle := printf "%s-%s-%d.%s-%s.%s.svc.%s" (include "yugabyte.fullname" $root) $service.label $index (include "yugabyte.fullname" $root) $service.name $root.Release.Namespace $root.Values.domainName }}
{{- $node := $root.Values.oldNamingStyle | ternary $nodeOldStyle $nodeNewStyle }}
- key: tls.crt
path: node.{{$node}}.crt
- key: tls.key
path: node.{{$node}}.key
{{- end }}
- key: ca.crt
path: ca.crt
{{- end }}
defaultMode: 256
{{- end }}
- name: {{ $root.Values.oldNamingStyle | ternary "yugabyte-tls-client-cert" (printf "%s-client-tls" (include "yugabyte.fullname" $root)) }}
secret:
secretName: {{ $root.Values.oldNamingStyle | ternary "yugabyte-tls-client-cert" (printf "%s-client-tls" (include "yugabyte.fullname" $root)) }}

View File

@ -2,15 +2,10 @@
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
Component: "yugabytedb"
fullnameOverride: ""
nameOverride: ""
Image:
repository: "yugabytedb/yugabyte"
tag: 2.18.7.0-b30
tag: 2.14.16.0-b17
pullPolicy: IfNotPresent
pullSecretName: ""
storage:
ephemeral: false # will not allocate PVs when true
@ -26,38 +21,27 @@ storage:
resource:
master:
requests:
cpu: "2"
cpu: 2
memory: 2Gi
limits:
cpu: "2"
cpu: 2
memory: 2Gi
tserver:
requests:
cpu: "2"
cpu: 2
memory: 4Gi
limits:
cpu: "2"
cpu: 2
memory: 4Gi
replicas:
master: 3
tserver: 3
## Used to set replication factor when isMultiAz is set to true
totalMasters: 3
partition:
master: 0
tserver: 0
# Used in Multi-AZ setup
masterAddresses: ""
isMultiAz: false
AZ: ""
# Disable the YSQL
disableYsql: false
tls:
# Set to true to enable the TLS.
enabled: false
@ -68,33 +52,25 @@ tls:
# Set enabled to true to use cert-manager instead of providing your own rootCA
certManager:
enabled: false
# Will create own ca certificate and issuer when set to true
bootstrapSelfsigned: true
# Use ClusterIssuer when set to true, otherwise use Issuer
# Will create own ca certificate and issuer when set to false
useClusterIssuer: false
# Name of ClusterIssuer to use when useClusterIssuer is true
# ignored when useClusterIssuer is false
clusterIssuer: cluster-ca
# Name of Issuer to use when useClusterIssuer is false
issuer: yugabyte-ca
certificates:
# The lifetime before cert-manager will issue a new certificate.
# The re-issued certificates will not be automatically reloaded by the service.
# It is necessary to provide some external means of restarting the pods.
duration: 2160h # 90d
renewBefore: 360h # 15d
algorithm: RSA # ECDSA or RSA
# Can be 2048, 4096 or 8192 for RSA
algorithm: ECDSA # ECDSA or RSA
# Can be 2046, 4096 or 8192 for RSA
# Or 256, 384 or 521 for ECDSA
keySize: 2048
keySize: 521
## When certManager.enabled=false, rootCA.cert and rootCA.key are used to generate TLS certs.
## When certManager.enabled=true and boostrapSelfsigned=true, rootCA is ignored.
## When certManager.enabled=true and bootstrapSelfsigned=false, only rootCA.cert is used
## to verify TLS certs generated and signed by the external provider.
# Will be ignored when certManager.enabled=true
rootCA:
cert: "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM2VENDQWRHZ0F3SUJBZ0lCQVRBTkJna3Foa2lHOXcwQkFRc0ZBREFXTVJRd0VnWURWUVFERXd0WmRXZGgKWW5sMFpTQkVRakFlRncweE9UQXlNRGd3TURRd01qSmFGdzB5T1RBeU1EVXdNRFF3TWpKYU1CWXhGREFTQmdOVgpCQU1UQzFsMVoyRmllWFJsSUVSQ01JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBCnVOMWF1aWc4b2pVMHM0OXF3QXhrT2FCaHkwcTlyaVg2akVyZWJyTHJOWDJOeHdWQmNVcWJkUlhVc3VZNS96RUQKUC9CZTNkcTFuMm9EQ2ZGVEwweGkyNFdNZExRcnJBMndCdzFtNHM1WmQzcEJ1U04yWHJkVVhkeUx6dUxlczJNbgovckJxcWRscXp6LzAyTk9TOE9SVFZCUVRTQTBSOFNMQ1RjSGxMQmRkMmdxZ1ZmemVXRlVObXhWQ2EwcHA5UENuCmpUamJJRzhJWkh5dnBkTyt3aURQM1Y1a1ZEaTkvbEtUaGUzcTFOeDg5VUNFcnRJa1pjSkYvWEs3aE90MU1sOXMKWDYzb2lVMTE1Q2svbGFGRjR6dWgrZk9VenpOVXRXeTc2RE92cm5pVGlaU0tQZDBBODNNa2l2N2VHaDVkV3owWgpsKzJ2a3dkZHJaRzVlaHhvbGhGS3pRSURBUUFCbzBJd1FEQU9CZ05WSFE4QkFmOEVCQU1DQXFRd0hRWURWUjBsCkJCWXdGQVlJS3dZQkJRVUhBd0VHQ0NzR0FRVUZCd01DTUE4R0ExVWRFd0VCL3dRRk1BTUJBZjh3RFFZSktvWkkKaHZjTkFRRUxCUUFEZ2dFQkFEQjVRbmlYd1ptdk52eG5VbS9sTTVFbms3VmhTUzRUZldIMHY4Q0srZWZMSVBTbwpVTkdLNXU5UzNEUWlvaU9SN1Vmc2YrRnk1QXljMmNUY1M2UXBxTCt0V1QrU1VITXNJNk9oQ05pQ1gvQjNKWERPCkd2R0RIQzBVOHo3aWJTcW5zQ2Rid05kajAyM0lwMHVqNE9DVHJ3azZjd0RBeXlwVWkwN2tkd28xYWJIWExqTnAKamVQMkwrY0hkc2dKM1N4WWpkK1kvei9IdmFrZG1RZDJTL1l2V0R3aU1SRDkrYmZXWkJVRHo3Y0QyQkxEVmU0aAp1bkFaK3NyelR2Sjd5dkVodzlHSDFyajd4Qm9VNjB5SUUrYSszK2xWSEs4WnBSV0NXMnh2eWNrYXJSKytPS2NKClFsL04wWExqNWJRUDVoUzdhOTdhQktTamNqY3E5VzNGcnhJa2tKST0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo="
key: "LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBdU4xYXVpZzhvalUwczQ5cXdBeGtPYUJoeTBxOXJpWDZqRXJlYnJMck5YMk54d1ZCCmNVcWJkUlhVc3VZNS96RURQL0JlM2RxMW4yb0RDZkZUTDB4aTI0V01kTFFyckEyd0J3MW00czVaZDNwQnVTTjIKWHJkVVhkeUx6dUxlczJNbi9yQnFxZGxxenovMDJOT1M4T1JUVkJRVFNBMFI4U0xDVGNIbExCZGQyZ3FnVmZ6ZQpXRlVObXhWQ2EwcHA5UENualRqYklHOElaSHl2cGRPK3dpRFAzVjVrVkRpOS9sS1RoZTNxMU54ODlVQ0VydElrClpjSkYvWEs3aE90MU1sOXNYNjNvaVUxMTVDay9sYUZGNHp1aCtmT1V6ek5VdFd5NzZET3ZybmlUaVpTS1BkMEEKODNNa2l2N2VHaDVkV3owWmwrMnZrd2RkclpHNWVoeG9saEZLelFJREFRQUJBb0lCQUJsdW1tU3gxR1djWER1Mwpwei8wZEhWWkV4c2NsU3U0SGRmZkZPcTF3cFlCUjlmeGFTZGsxQzR2YXF1UjhMaWl6WWVtVWViRGgraitkSnlSCmpwZ2JNaDV4S1BtRkw5empwU3ZUTkN4UHB3OUF5bm5sM3dyNHZhcU1CTS9aZGpuSGttRC9kQzBadEEvL0JIZ3YKNHk4d3VpWCsvUWdVaER0Z1JNcmR1ZUZ1OVlKaFo5UE9jYXkzSkkzMFhEYjdJSS9vNFNhYnhTcFI3bTg5WjY0NwpUb3hsOEhTSzl0SUQxbkl1bHVpTmx1dHI1RzdDdE93WTBSc2N5dmZ2elg4a1d2akpLZVJVbmhMSCtXVFZOaExICjdZc0tMNmlLa1NkckMzeWVPWnV4R0pEbVdrZVgxTzNPRUVGYkc4TjVEaGNqL0lXbDh1dGt3LzYwTEthNHBCS2cKTXhtNEx3RUNnWUVBNnlPRkhNY2pncHYxLzlHZC8yb3c2YmZKcTFjM1dqQkV2cnM2ZXNyMzgrU3UvdVFneXJNcAo5V01oZElpb2dYZjVlNjV5ZlIzYVBXcjJJdWMxZ0RUNlYycDZFR2h0NysyQkF1YkIzczloZisycVNRY1lkS3pmCnJOTDdKalE4ZEVGZWdYd041cHhKOTRTTVFZNEI4Qm9hOHNJWTd3TzU4dHpVMjZoclVnanFXQ1VDZ1lFQXlVUUIKNzViWlh6MGJ5cEc5NjNwYVp0bGlJY0cvUk1XMnVPOE9rVFNYSGdDSjBob25uRm5IMGZOc1pGTHdFWEtnTTRORworU3ZNbWtUekE5eVVSMHpIMFJ4UW44L1YzVWZLT2k5RktFeWx6NzNiRkV6ZW1QSEppQm12NWQ4ZTlOenZmU0E0CkdpRTYrYnFyV3VVWWRoRWlYTnY1SFNPZ3I4bUx1TzJDbGlmNTg0a0NnWUFlZzlDTmlJWmlOODAzOHNNWFYzZWIKalI5ZDNnYXY3SjJ2UnVyeTdvNDVGNDlpUXNiQ3AzZWxnY1RnczY5eWhkaFpwYXp6OGNEVndhREpyTW16cHF4cQpWY1liaFFIblppSWM5MGRubS9BaVF2eWJWNUZqNnQ5b05VVWtreGpaV1haalJXOGtZMW55QmtDUmJWVnhER0k4CjZOV0ZoeTFGaUVVVGNJcms3WVZFQlFLQmdRREpHTVIrYWRFamtlRlUwNjVadkZUYmN0VFVPY3dzb1Foalc2akkKZVMyTThxakNYeE80NnhQMnVTeFNTWFJKV3FpckQ3NDRkUVRvRjRCaEdXS21veGI3M3pqSGxWaHcwcXhDMnJ4VQorZENxODE0VXVJR3BlOTBMdWU3QTFlRU9kRHB1WVdUczVzc1FmdTE3MG5CUWQrcEhzaHNFZkhhdmJjZkhyTGpQCjQzMmhVUUtCZ1FDZ3hMZG5Pd2JMaHZLVkhhdTdPVXQxbGpUT240SnB5bHpnb3hFRXpzaDhDK0ZKUUQ1bkFxZXEKZUpWSkNCd2VkallBSDR6MUV3cHJjWnJIN3IyUTBqT2ZFallwU1dkZGxXaWh4OTNYODZ0aG83UzJuUlYrN1hNcQpPVW9ZcVZ1WGlGMWdMM1NGeHZqMHhxV3l0d0NPTW5DZGFCb0M0Tkw3enJtL0lZOEUwSkw2MkE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo="
## When tls.certManager.enabled=false
## nodeCert and clientCert will be used only when rootCA.key is empty.
## Will be ignored and genSignedCert will be used to generate
## node and client certs if rootCA.key is provided.
@ -109,58 +85,33 @@ tls:
gflags:
master:
default_memory_limit_to_ram_ratio: 0.85
tserver: {}
# tserver:
# use_cassandra_authentication: false
PodManagementPolicy: Parallel
enableLoadBalancer: true
ybc:
enabled: false
## https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-requests-and-limits-of-pod-and-container
## Use the above link to learn more about Kubernetes resources configuration.
# resources:
# requests:
# cpu: "1"
# memory: 1Gi
# limits:
# cpu: "1"
# memory: 1Gi
ybCleanup: {}
## https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-requests-and-limits-of-pod-and-container
## Use the above link to learn more about Kubernetes resources configuration.
# resources:
# requests:
# cpu: "1"
# memory: 1Gi
# limits:
# cpu: "1"
# memory: 1Gi
isMultiAz: false
domainName: "cluster.local"
serviceEndpoints:
- name: "yb-master-ui"
type: LoadBalancer
annotations: {}
clusterIP: ""
## Sets the Service's externalTrafficPolicy
externalTrafficPolicy: ""
# externalTrafficPolicy: ""
app: "yb-master"
loadBalancerIP: ""
# loadBalancerIP: ""
ports:
http-ui: "7000"
- name: "yb-tserver-service"
type: LoadBalancer
annotations: {}
clusterIP: ""
## Sets the Service's externalTrafficPolicy
externalTrafficPolicy: ""
# externalTrafficPolicy: ""
app: "yb-tserver"
loadBalancerIP: ""
# loadBalancerIP: ""
ports:
tcp-yql-port: "9042"
tcp-yedis-port: "6379"
@ -187,11 +138,8 @@ Services:
http-ycql-met: "12000"
http-yedis-met: "11000"
http-ysql-met: "13000"
grpc-ybc-port: "18018"
## Should be set to true only if Istio is being used. This also adds
## the Istio sidecar injection labels to the pods.
## Should be set to true only if Istio is being used.
## TODO: remove this once
## https://github.com/yugabyte/yugabyte-db/issues/5641 is fixed.
##
@ -208,22 +156,6 @@ multicluster:
## failover. Useful when using new naming style.
createCommonTserverService: false
## Enable it to deploy YugabyteDB in a multi-cluster services enabled
## Kubernetes cluster (KEP-1645). This will create ServiceExport.
## GKE Ref - https://cloud.google.com/kubernetes-engine/docs/how-to/multi-cluster-services#registering_a_service_for_export
## You can use this gist for the reference to deploy the YugabyteDB in a multi-cluster scenario.
## Gist - https://gist.github.com/baba230896/78cc9bb6f4ba0b3d0e611cd49ed201bf
createServiceExports: false
## Mandatory variable when createServiceExports is set to true.
## Use: In case of GKE, you need to pass GKE Hub Membership Name.
## GKE Ref - https://cloud.google.com/kubernetes-engine/docs/how-to/multi-cluster-services#enabling
kubernetesClusterId: ""
## mcsApiVersion is used for the MCS resources created by the
## chart. Set to net.gke.io/v1 when using GKE MCS.
mcsApiVersion: "multicluster.x-k8s.io/v1alpha1"
serviceMonitor:
## If true, two ServiceMonitor CRs are created. One for yb-master
## and one for yb-tserver
@ -299,37 +231,9 @@ affinity: {}
statefulSetAnnotations: {}
networkAnnotation: {}
commonLabels: {}
## @param dnsPolicy DNS Policy for pod
## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/
## E.g.
## dnsPolicy: ClusterFirst
dnsPolicy: ""
## @param dnsConfig DNS Configuration pod
## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/
## E.g.
## dnsConfig:
## options:
## - name: ndots
## value: "4"
dnsConfig: {}
master:
## Ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#affinity-v1-core
## This might override the default affinity from service.yaml
# To successfully merge, we need to follow rules for merging nodeSelectorTerms that kubernentes
# has. Each new node selector term is ORed together, and each match expression or match field in
# a single selector is ANDed together.
# This means, if a pod needs to be scheduled on a label 'custom_label_1' with a value
# 'custom_value_1', we need to add this 'subterm' to each of our pre-defined node affinity
# terms.
#
# Pod anti affinity is a simpler merge. Each term is applied separately, and the weight is tracked.
# The pod that achieves the highest weight is selected.
## Example.
# affinity:
# podAntiAffinity:
@ -341,8 +245,6 @@ master:
# values:
# - "yb-master"
# topologyKey: kubernetes.io/hostname
#
# For further examples, see examples/yugabyte/affinity_overrides.yaml
affinity: {}
## Extra environment variables passed to the Master pods.
@ -399,23 +301,10 @@ master:
# mountPath: /home/yugabyte/nfs-backup
extraVolumeMounts: []
## Set service account for master DB pods. The service account
## should exist in the namespace where the master DB pods are brought up.
serviceAccount: ""
tserver:
## Ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#affinity-v1-core
## This might override the default affinity from service.yaml
# To successfully merge, we need to follow rules for merging nodeSelectorTerms that kubernentes
# has. Each new node selector term is ORed together, and each match expression or match field in
# a single selector is ANDed together.
# This means, if a pod needs to be scheduled on a label 'custom_label_1' with a value
# 'custom_value_1', we need to add this 'subterm' to each of our pre-defined node affinity
# terms.
#
# Pod anti affinity is a simpler merge. Each term is applied separately, and the weight is tracked.
# The pod that achieves the highest weight is selected.
## Example.
# affinity:
# podAntiAffinity:
@ -427,7 +316,6 @@ tserver:
# values:
# - "yb-tserver"
# topologyKey: kubernetes.io/hostname
# For further examples, see examples/yugabyte/affinity_overrides.yaml
affinity: {}
## Extra environment variables passed to the TServer pods.
@ -440,16 +328,13 @@ tserver:
# fieldPath: status.hostIP
extraEnv: []
## secretEnv variables are used to expose secrets data as env variables in the tserver pods.
## If namespace field is not specified we assume that user already
## created the secret in the same namespace as DB pods.
## Example
# secretEnv variables are used to expose secrets data as env variables in the tserver pods.
# TODO Add namespace also to support copying secrets from other namespace.
# secretEnv:
# - name: MYSQL_LDAP_PASSWORD
# valueFrom:
# secretKeyRef:
# name: secretName
# namespace: my-other-namespace-with-ldap-secret
# key: password
secretEnv: []
@ -492,10 +377,6 @@ tserver:
# path: /home/yugabyte/nfs-backup
extraVolumeMounts: []
## Set service account for tserver DB pods. The service account
## should exist in the namespace where the tserver DB pods are brought up.
serviceAccount: ""
helm2Legacy: false
ip_version_support: "v4_only" # v4_only, v6_only are the only supported values at the moment

View File

@ -3,20 +3,15 @@ annotations:
catalog.cattle.io/display-name: YugabyteDB Anywhere
catalog.cattle.io/kube-version: '>=1.18-0'
catalog.cattle.io/release-name: yugaware
charts.openshift.io/name: yugaware
apiVersion: v2
appVersion: 2.18.7.0-b30
description: YugabyteDB Anywhere provides deployment, orchestration, and monitoring
for managing YugabyteDB clusters. YugabyteDB Anywhere can create a YugabyteDB cluster
with multiple pods provided by Kubernetes or OpenShift and logically grouped together
to form one logical distributed database.
apiVersion: v1
appVersion: 2.14.16.0-b17
description: YugaWare is YugaByte Database's Orchestration and Management console.
home: https://www.yugabyte.com
icon: https://avatars0.githubusercontent.com/u/17074854?s=200&v=4
kubeVersion: '>=1.18-0'
maintainers:
- email: sanketh@yugabyte.com
name: Sanketh Indarapu
- email: gjalla@yugabyte.com
name: Govardhan Reddy Jalla
- email: ram@yugabyte.com
name: Ram Sri
- email: arnav@yugabyte.com
name: Arnav Agarwal
name: yugaware
version: 2.18.7
version: 2.14.16

View File

@ -1,7 +1,5 @@
YugabyteDB Anywhere gives you the simplicity and support to deliver a private database-as-a-service (DBaaS) at scale. Use YugabyteDB Anywhere to deploy YugabyteDB across any cloud anywhere in the world with a few clicks, simplify day 2 operations through automation, and get the services needed to realize business outcomes with the database.
YugabyteDB Anywhere can be deployed using this Helm chart. Detailed documentation is available at:
- [Install YugabyteDB Anywhere software - Kubernetes](https://docs.yugabyte.com/preview/yugabyte-platform/install-yugabyte-platform/install-software/kubernetes/)
- [Install YugabyteDB Anywhere software - OpenShift (Helm based)](https://docs.yugabyte.com/preview/yugabyte-platform/install-yugabyte-platform/install-software/openshift/#helm-based-installation)
YugabyteDB Anywhere can be deployed using this helm chart. Detailed documentation is available at <https://docs.yugabyte.com/preview/yugabyte-platform/install-yugabyte-platform/install-software/kubernetes/>
[![Artifact HUB](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/yugabyte)](https://artifacthub.io/packages/search?repo=yugabyte)

View File

@ -1,24 +0,0 @@
# OCP compatible values for yugaware
image:
repository: quay.io/yugabyte/yugaware-ubi
postgres:
registry: registry.redhat.io
tag: 1-88.1661531722
name: rhscl/postgresql-13-rhel7
prometheus:
registry: registry.redhat.io
tag: v4.11.0
name: openshift4/ose-prometheus
rbac:
create: false
ocpCompatibility:
enabled: true
securityContext:
enabled: false

View File

@ -15,7 +15,7 @@ questions:
label: Yugabyte Platform image repository
description: "Yugabyte Platform image repository"
- variable: image.tag
default: "2.5.1.0-b153"
default: "2.14.1.0-b36"
required: false
type: string
label: Yugabyte Platform image tag

View File

@ -1,14 +0,0 @@
{{/*
The usage of helm upgrade [RELEASE] [CHART] --reuse-values --set [variable]:[value] throws an
error in the event that new entries are inserted to the values chart.
This is because reuse-values flag uses the values from the last release. If --set (/--set-file/
--set-string/--values/-f) is applied with the reuse-values flag, the values from the last
release are overridden for those variables alone, and newer changes to the chart are
unacknowledged.
https://medium.com/@kcatstack/understand-helm-upgrade-flags-reset-values-reuse-values-6e58ac8f127e
To prevent errors while applying upgrade with --reuse-values and --set flags after introducing
new variables, default values can be specified in this file.
*/}}

View File

@ -169,57 +169,6 @@ server.pem: {{ $serverPemContent }}
{{- end -}}
{{- end -}}
{{/*
Check export of nss_wrapper environment variables required
*/}}
{{- define "checkNssWrapperExportRequired" -}}
{{- if .Values.securityContext.enabled -}}
{{- if and (ne (int .Values.securityContext.runAsUser) 0) (ne (int .Values.securityContext.runAsUser) 10001) -}}
{{- printf "true" -}}
{{- end -}}
{{- else -}}
{{- printf "false" -}}
{{- end -}}
{{- end -}}
{{/*
Verify the extraVolumes and extraVolumeMounts mappings.
Every extraVolumes should have extraVolumeMounts
*/}}
{{- define "yugaware.isExtraVolumesMappingExists" -}}
{{- $lenExtraVolumes := len .extraVolumes -}}
{{- $lenExtraVolumeMounts := len .extraVolumeMounts -}}
{{- if and (eq $lenExtraVolumeMounts 0) (gt $lenExtraVolumes 0) -}}
{{- fail "You have not provided the extraVolumeMounts for extraVolumes." -}}
{{- else if and (eq $lenExtraVolumes 0) (gt $lenExtraVolumeMounts 0) -}}
{{- fail "You have not provided the extraVolumes for extraVolumeMounts." -}}
{{- else if and (gt $lenExtraVolumes 0) (gt $lenExtraVolumeMounts 0) -}}
{{- $volumeMountsList := list -}}
{{- range .extraVolumeMounts -}}
{{- $volumeMountsList = append $volumeMountsList .name -}}
{{- end -}}
{{- $volumesList := list -}}
{{- range .extraVolumes -}}
{{- $volumesList = append $volumesList .name -}}
{{- end -}}
{{- range $volumesList -}}
{{- if not (has . $volumeMountsList) -}}
{{- fail (printf "You have not provided the extraVolumeMounts for extraVolume %s" .) -}}
{{- end -}}
{{- end -}}
{{- range $volumeMountsList -}}
{{- if not (has . $volumesList) -}}
{{- fail (printf "You have not provided the extraVolumes for extraVolumeMounts %s" .) -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Make list of custom http headers
*/}}
@ -234,4 +183,4 @@ Make list of custom http headers
{{- end -}}
{{- end -}}
]
{{- end -}}
{{- end -}}

View File

@ -1,99 +0,0 @@
# Copyright (c) YugaByte, Inc.
{{- $root := . }}
{{- $tls := $root.Values.tls }}
{{- if and $tls.enabled $tls.certManager.enabled }}
{{- if $tls.certManager.genSelfsigned }}
{{- if $tls.certManager.useClusterIssuer }}
---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: {{ $root.Release.Name }}-yugaware-cluster-issuer
spec:
selfSigned: {}
{{- else }} # useClusterIssuer=false
---
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: {{ $root.Release.Name }}-yugaware-issuer
namespace: {{ $root.Release.Namespace }}
spec:
selfSigned: {}
---
{{- end }} # useClusterIssuer
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: {{ $root.Release.Name }}-yugaware-ui-root-ca
namespace: {{ $root.Release.Namespace }}
spec:
isCA: true
commonName: Yugaware self signed CA
secretName: {{ .Release.Name }}-yugaware-root-ca
secretTemplate:
labels:
app: "{{ template "yugaware.name" . }}"
chart: "{{ template "yugaware.chart" . }}"
release: {{ .Release.Name | quote }}
heritage: {{ .Release.Service | quote }}
duration: {{ $tls.certManager.configuration.duration | quote }}
renewBefore: {{ $tls.certManager.configuration.renewBefore | quote }}
privateKey:
algorithm: {{ $tls.certManager.configuration.algorithm | quote }}
encoding: PKCS8
size: {{ $tls.certManager.configuration.keySize }}
rotationPolicy: Always
issuerRef:
{{- if $tls.certManager.useClusterIssuer }}
name: {{ $root.Release.Name }}-yugaware-cluster-issuer
kind: ClusterIssuer
{{- else }}
name: {{ $root.Release.Name }}-yugaware-issuer
kind: Issuer
{{- end }}
---
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: {{ $root.Release.Name }}-yugaware-ca-issuer
namespace: {{ $root.Release.Namespace }}
spec:
ca:
secretName: {{ .Release.Name }}-yugaware-root-ca
---
{{- end }} # genSelfsigned
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: {{ $root.Release.Name }}-yugaware-ui-tls
namespace: {{ $root.Release.Namespace }}
spec:
isCA: false
commonName: {{ $tls.hostname }}
secretName: {{ .Release.Name }}-yugaware-tls-cert
secretTemplate:
labels:
app: "{{ template "yugaware.name" . }}"
chart: "{{ template "yugaware.chart" . }}"
release: {{ .Release.Name | quote }}
heritage: {{ .Release.Service | quote }}
duration: {{ $tls.certManager.configuration.duration | quote }}
renewBefore: {{ $tls.certManager.configuration.renewBefore | quote }}
privateKey:
algorithm: {{ $tls.certManager.configuration.algorithm | quote }}
encoding: PKCS8
size: {{ $tls.certManager.configuration.keySize }}
rotationPolicy: Always
issuerRef:
name: {{ $tls.certManager.genSelfsigned | ternary (printf "%s%s" $root.Release.Name "-yugaware-ca-issuer") ($tls.certManager.useClusterIssuer | ternary $tls.certManager.clusterIssuer $tls.certManager.issuer) }}
{{- if $tls.certManager.useClusterIssuer }}
kind: ClusterIssuer
{{- else }}
kind: Issuer
{{- end }}
---
{{- end }}

View File

@ -31,31 +31,27 @@ data:
log.override.path = "/opt/yugabyte/yugaware/data/logs"
db {
default.dbname=${POSTGRES_DB}
{{ if .Values.postgres.external.host }}
default.host="{{ .Values.postgres.external.host }}"
default.port={{ .Values.postgres.external.port }}
default.url="jdbc:postgresql://"${db.default.host}":"${db.default.port}"/"${POSTGRES_DB}${db.default.params}
{{ else if eq .Values.ip_version_support "v6_only" }}
default.host="[::1]"
default.host="::1"
default.url="jdbc:postgresql://[::1]:"${db.default.port}"/"${POSTGRES_DB}${db.default.params}
{{ else }}
default.host="127.0.0.1"
default.url="jdbc:postgresql://127.0.0.1:"${db.default.port}"/"${POSTGRES_DB}${db.default.params}
{{ end }}
default.url="jdbc:postgresql://"${db.default.host}":"${db.default.port}"/"${db.default.dbname}${db.default.params}
default.params="{{ .Values.jdbcParams }}"
default.driver=org.postgresql.Driver
default.username=${POSTGRES_USER}
default.password=${POSTGRES_PASSWORD}
{{ if .Values.yugaware.cloud.enabled }}
perf_advisor.driver="org.hsqldb.jdbc.JDBCDriver"
perf_advisor.url="jdbc:hsqldb:mem:perf-advisor"
perf_advisor.createDatabaseIfMissing=false
perf_advisor.username="sa"
perf_advisor.password="sa"
perf_advisor.migration.auto=false
perf_advisor.migration.disabled=true
{{ else }}
perf_advisor.url="jdbc:postgresql://"${db.default.host}":"${db.default.port}"/"${db.perf_advisor.dbname}${db.default.params}
perf_advisor.createDatabaseUrl="jdbc:postgresql://"${db.default.host}":"${db.default.port}"/"${db.default.dbname}${db.default.params}
{{ end }}
default.logStatements=true
default.migration.initOnMigrate=true
default.migration.auto=true
}
ebean {
default = ["com.yugabyte.yw.models.*"]
}
{{- if .Values.tls.enabled }}
@ -144,7 +140,7 @@ data:
{{- range $key, $value := .Values.additionalAppConf.nonStringConf }}
{{ $key }} = {{ $value }}
{{- end }}
{{- if and .Values.tls.enabled (not .Values.tls.certManager.enabled) }}
{{- if .Values.tls.enabled }}
---
apiVersion: v1
kind: Secret
@ -159,8 +155,8 @@ type: Opaque
data:
{{- include "getOrCreateServerPem" (dict "Namespace" .Release.Namespace "Root" . "Name" (printf "%s%s" .Release.Name "-yugaware-tls-pem")) | nindent 2 }}
{{- end }}
---
{{- if not (and (.Values.ocpCompatibility.enabled) (eq .Values.image.postgres.registry "registry.redhat.io")) }}
apiVersion: v1
kind: ConfigMap
metadata:
@ -186,25 +182,6 @@ data:
docker-upgrade pg_upgrade | tee -a /pg_upgrade_logs/pg_upgrade_11_to_14.log;
echo "host all all all scram-sha-256" >> "${PGDATANEW}/pg_hba.conf";
fi
{{- end }}
{{- if .Values.securityContext.enabled }}
---
apiVersion: "v1"
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-yugaware-pg-prerun
labels:
app: {{ template "yugaware.name" . }}
chart: {{ template "yugaware.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Values.helm2Legacy | ternary "Tiller" (.Release.Service | quote) }}
data:
pg-prerun.sh: |
#!/bin/bash
set -x -o errexit
mkdir -p $PGDATA && chown -R $PG_UID:$PG_GID $PGDATA;
{{- end }}
{{- if .Values.prometheus.remoteWrite.tls.enabled }}
---
@ -275,11 +252,7 @@ data:
- 'container_cpu_usage_seconds_total{pod=~"(.*)yb-(.*)"}'
- 'container_memory_working_set_bytes{pod=~"(.*)yb-(.*)"}'
# kube-state-metrics
# Supports >= OCP v4.4
# OCP v4.4 has upgraded the KSM from 1.8.0 to 1.9.5.
# https://docs.openshift.com/container-platform/4.4/release_notes/ocp-4-4-release-notes.html#ocp-4-4-cluster-monitoring-version-updates
# - 'kube_pod_container_resource_requests_cpu_cores{pod=~"(.*)yb-(.*)"}'
- 'kube_pod_container_resource_requests{pod=~"(.*)yb-(.*)", unit="core"}'
- 'kube_pod_container_resource_requests_cpu_cores{pod=~"(.*)yb-(.*)"}'
static_configs:
- targets:
@ -299,15 +272,8 @@ data:
regex: "(.*)"
target_label: "container_name"
replacement: "$1"
# rename new name of the CPU metric to the old name and label
# ref: https://github.com/kubernetes/kube-state-metrics/blob/master/CHANGELOG.md#v200-alpha--2020-09-16
- source_labels: ["__name__", "unit"]
regex: "kube_pod_container_resource_requests;core"
target_label: "__name__"
replacement: "kube_pod_container_resource_requests_cpu_cores"
{{- else }}
{{- if .Values.prometheus.scrapeKubernetesNodes }}
- job_name: 'kubernetes-nodes'
@ -356,8 +322,8 @@ data:
- targets: ['kube-state-metrics.kube-system.svc.{{.Values.domainName}}:8080']
metric_relabel_configs:
# Only keep the metrics which we care about
- source_labels: ["__name__", "unit"]
regex: "kube_pod_container_resource_requests;core"
- source_labels: ["__name__"]
regex: "kube_pod_container_resource_requests_cpu_cores"
action: keep
# Save the name of the metric so we can group_by since we cannot by __name__ directly...
- source_labels: ["__name__"]
@ -376,16 +342,6 @@ data:
- source_labels: ["pod_name"]
regex: "(.*)yb-(.*)"
action: keep
# rename new name of the CPU metric to the old name and label
# ref: https://github.com/kubernetes/kube-state-metrics/blob/master/CHANGELOG.md#v200-alpha--2020-09-16
- source_labels: ["__name__", "unit"]
regex: "kube_pod_container_resource_requests;core"
target_label: "__name__"
replacement: "kube_pod_container_resource_requests_cpu_cores"
# Keep metrics for CPU, discard duplicate metrics
- source_labels: ["__name__"]
regex: "kube_pod_container_resource_requests_cpu_cores"
action: keep
- job_name: 'kubernetes-cadvisor'
@ -431,21 +387,6 @@ data:
action: keep
{{- end }}
{{- end }}
{{- if .Values.tls.enabled }}
- job_name: 'platform'
metrics_path: "/api/v1/prometheus_metrics"
scheme: https
tls_config:
insecure_skip_verify: true
static_configs:
- targets: [
'{{ eq .Values.ip_version_support "v6_only" | ternary "[::1]" "127.0.0.1" }}:9443'
]
{{- else }}
- job_name: 'platform'
metrics_path: "/api/v1/prometheus_metrics"
@ -454,14 +395,6 @@ data:
'{{ eq .Values.ip_version_support "v6_only" | ternary "[::1]" "127.0.0.1" }}:9000'
]
{{- end }}
- job_name: 'node-agent'
metrics_path: "/metrics"
file_sd_configs:
- files:
- '/opt/yugabyte/prometheus/targets/node-agent.*.json'
- job_name: "node"
file_sd_configs:
- files:
@ -547,8 +480,6 @@ data:
replacement: "$1"
- job_name: "yugabyte"
tls_config:
insecure_skip_verify: true
metrics_path: "/prometheus-metrics"
file_sd_configs:
- files:

View File

@ -16,8 +16,8 @@ data:
postgres_user: {{ .Values.postgres.external.user | b64enc | quote }}
postgres_password: {{ .Values.postgres.external.pass | b64enc | quote }}
{{- else }}
postgres_db: {{ .Values.postgres.dbname | b64enc | quote }}
postgres_user: {{ .Values.postgres.user | b64enc | quote }}
postgres_db: {{ "yugaware" | b64enc | quote }}
postgres_user: {{ "postgres" | b64enc | quote }}
postgres_password: {{ include "getOrGeneratePasswordConfigMapToSecret" (dict "Namespace" .Release.Namespace "Name" (printf "%s%s" .Release.Name "-yugaware-global-config") "Key" "postgres_password") | quote }}
{{- end }}
app_secret: {{ randAlphaNum 64 | b64enc | b64enc | quote }}

View File

@ -1,4 +1,3 @@
{{ if not .Values.yugaware.serviceAccount }}
apiVersion: v1
kind: ServiceAccount
metadata:
@ -11,7 +10,6 @@ metadata:
annotations:
{{ toYaml .Values.yugaware.serviceAccountAnnotations | indent 4 }}
{{- end }}
{{ end }}
{{- if .Values.rbac.create }}
{{- if .Values.ocpCompatibility.enabled }}
---
@ -23,7 +21,7 @@ metadata:
app: yugaware
subjects:
- kind: ServiceAccount
name: {{ .Values.yugaware.serviceAccount | default .Release.Name }}
name: {{ .Release.Name }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
@ -31,172 +29,43 @@ roleRef:
apiGroup: rbac.authorization.k8s.io
{{- else }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ .Release.Name }}
labels:
k8s-app: yugaware
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
rules:
# Set of permissions required for operator
- apiGroups:
- operator.yugabyte.io
- apiGroups: [""]
resources:
- "*"
verbs:
- "get"
- "create"
- "delete"
- "patch"
- "list"
- "watch"
- "update"
# Set of permissions required to install, upgrade, delete the yugabyte chart
- nodes
- nodes/proxy
- services
- endpoints
- pods
- pods/exec
verbs: ["get", "list", "watch", "create"]
- apiGroups:
- "policy"
- extensions
resources:
- "poddisruptionbudgets"
verbs:
- "get"
- "create"
- "delete"
- "patch"
- apiGroups:
- ""
- ingresses
verbs: ["get", "list", "watch"]
- nonResourceURLs: ["/metrics"]
verbs: ["get"]
- apiGroups: [""]
resources:
- "services"
verbs:
- "get"
- "delete"
- "create"
- "patch"
- apiGroups:
- "apps"
- namespaces
- secrets
- pods/portforward
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
- apiGroups: ["", "extensions"]
resources:
- "statefulsets"
verbs:
- "get"
- "list"
- "delete"
- "create"
- "patch"
- apiGroups:
- ""
resources:
- "secrets"
verbs:
- "create"
- "list"
- "get"
- "delete"
- "update"
- "patch"
- apiGroups:
- "cert-manager.io"
resources:
- "certificates"
verbs:
- "create"
- "delete"
- "get"
- "patch"
- apiGroups:
- ""
resources:
- "configmaps"
verbs:
- "get"
- "create"
- "patch"
- "delete"
# Set of permissions required by YBA to manage YB DB universes
- apiGroups:
- ""
resources:
- "namespaces"
verbs:
- "delete"
- "create"
- "patch"
- "get"
- "list"
- apiGroups:
- ""
resources:
- "pods"
verbs:
- "get"
- "list"
- "delete"
- apiGroups:
- ""
resources:
- "services"
verbs:
- "get"
- "list"
- apiGroups:
- ""
resources:
- "persistentvolumeclaims"
verbs:
- "get"
- "patch"
- "list"
- "delete"
- apiGroups:
- ""
resources:
- "pods/exec"
verbs:
- "create"
- apiGroups:
- "apps"
resources:
- "statefulsets/scale"
verbs:
- "patch"
- apiGroups:
- ""
resources:
- "events"
verbs:
- "list"
# required to scrape resource metrics like CPU, memory, etc.
- apiGroups:
- ""
resources:
- "nodes"
verbs:
- "list"
- "get"
- "watch"
# required to scrape resource metrics like CPU, memory, etc.
- apiGroups:
- ""
resources:
- "nodes/proxy"
verbs:
- "get"
# Ref: https://github.com/yugabyte/charts/commit/4a5319972385666487a7bc2cd0c35052f2cfa4c5
- apiGroups:
- ""
resources:
- "events"
verbs:
- "get"
- "list"
- "watch"
- "create"
- "update"
- "patch"
- "delete"
- apiGroups:
- ""
resources:
- "configmaps"
verbs:
- "list"
- "watch"
- "update"
- deployments
- services
verbs: ["create", "get", "list", "watch", "update", "delete"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
@ -208,7 +77,7 @@ metadata:
addonmanager.kubernetes.io/mode: Reconcile
subjects:
- kind: ServiceAccount
name: {{ .Values.yugaware.serviceAccount | default .Release.Name }}
name: {{ .Release.Name }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole

View File

@ -40,10 +40,6 @@ spec:
{{- if and (eq .Values.yugaware.service.type "LoadBalancer") (.Values.yugaware.service.ip) }}
loadBalancerIP: "{{ .Values.yugaware.service.ip }}"
{{- end }}
{{- if .Values.yugaware.service.loadBalancerSourceRanges }}
loadBalancerSourceRanges:
{{- toYaml .Values.yugaware.service.loadBalancerSourceRanges | nindent 4 }}
{{- end }}
{{- end }}
{{- if .Values.yugaware.serviceMonitor.enabled }}
---

View File

@ -25,11 +25,8 @@ spec:
{{- end }}
labels:
app: {{ .Release.Name }}-yugaware
{{- if .Values.yugaware.pod.labels }}
{{ toYaml .Values.yugaware.pod.labels | indent 8 }}
{{- end }}
spec:
serviceAccountName: {{ .Values.yugaware.serviceAccount | default .Release.Name }}
serviceAccountName: {{ .Release.Name }}
imagePullSecrets:
- name: {{ .Values.image.pullSecret }}
{{- if .Values.securityContext.enabled }}
@ -39,30 +36,6 @@ spec:
fsGroupChangePolicy: {{ .Values.securityContext.fsGroupChangePolicy }}
{{- end }}
{{- end }}
{{- if .Values.nodeSelector }}
nodeSelector:
{{ toYaml .Values.nodeSelector | indent 8}}
{{- end }}
{{- if .Values.tolerations }}
tolerations:
{{- with .Values.tolerations }}{{ toYaml . | nindent 8 }}{{ end }}
{{- end }}
{{- if .Values.zoneAffinity }}
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: failure-domain.beta.kubernetes.io/zone
operator: In
values:
{{ toYaml .Values.zoneAffinity | indent 18 }}
- matchExpressions:
- key: topology.kubernetes.io/zone
operator: In
values:
{{ toYaml .Values.zoneAffinity | indent 18 }}
{{- end }}
volumes:
- name: yugaware-storage
persistentVolumeClaim:
@ -111,36 +84,15 @@ spec:
secret:
secretName: {{ .Release.Name }}-yugaware-prometheus-remote-write-tls
{{- end }}
{{- if not (and (.Values.ocpCompatibility.enabled) (eq .Values.image.postgres.registry "registry.redhat.io")) }}
- name: pg-upgrade-11-to-14
configMap:
name: {{ .Release.Name }}-yugaware-pg-upgrade
items:
- key: pg-upgrade-11-to-14.sh
path: pg-upgrade-11-to-14.sh
{{- end }}
- name: pg-init
configMap:
name: {{ .Release.Name }}-yugaware-pg-prerun
items:
- key: pg-prerun.sh
path: pg-prerun.sh
{{- if .Values.postgres.extraVolumes -}}
{{- include "yugaware.isExtraVolumesMappingExists" .Values.postgres -}}
{{- .Values.postgres.extraVolumes | toYaml | nindent 8 -}}
{{ end }}
{{- with .Values.dnsConfig }}
dnsConfig: {{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.dnsPolicy }}
dnsPolicy: {{ . | quote }}
{{- end }}
initContainers:
- image: {{ include "full_yugaware_image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- if .Values.initContainers.prometheusConfiguration.resources }}
resources: {{- toYaml .Values.initContainers.prometheusConfiguration.resources | nindent 12 }}
{{ end -}}
name: prometheus-configuration
{{- if .Values.securityContext.enabled }}
command:
@ -168,13 +120,9 @@ spec:
- name: init-container-script
mountPath: /init-container
{{- end }}
{{- if not (and (.Values.ocpCompatibility.enabled) (eq .Values.image.postgres.registry "registry.redhat.io")) }}
- image: {{ include "full_image" (dict "containerName" "postgres-upgrade" "root" .) }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
name: postgres-upgrade
{{- if .Values.initContainers.postgresUpgrade.resources }}
resources: {{- toYaml .Values.initContainers.postgresUpgrade.resources | nindent 12 }}
{{ end -}}
command:
- 'bash'
- '-c'
@ -204,46 +152,12 @@ spec:
- name: yugaware-storage
mountPath: /pg_upgrade_logs
subPath: postgres_data_14
{{- end }}
{{- if .Values.securityContext.enabled }}
- image: {{ include "full_image" (dict "containerName" "postgres" "root" .) }}
name: postgres-init
{{- if .Values.initContainers.postgresInit.resources }}
resources: {{- toYaml .Values.initContainers.postgresInit.resources | nindent 12 }}
{{ end -}}
imagePullPolicy: {{ .Values.image.pullPolicy }}
command: ["/bin/bash", "/pg_prerun/pg-prerun.sh"]
env:
- name: PGDATA
value: /var/lib/postgresql/data/pgdata
- name: PG_UID
value: {{ .Values.securityContext.runAsUser | quote }}
- name: PG_GID
value: {{ .Values.securityContext.runAsGroup | quote }}
volumeMounts:
- name: yugaware-storage
mountPath: /var/lib/postgresql/data
subPath: postgres_data_14
- name: pg-init
mountPath: /pg_prerun
{{- end }}
containers:
{{ if not .Values.postgres.external.host }}
- name: postgres
image: {{ include "full_image" (dict "containerName" "postgres" "root" .) }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
args:
{{- if and (.Values.ocpCompatibility.enabled) (eq .Values.image.postgres.registry "registry.redhat.io") }}
- "run-postgresql"
{{- end }}
- "-c"
- "huge_pages=off"
{{- if .Values.securityContext.enabled }}
securityContext:
runAsUser: {{ required "runAsUser cannot be empty" .Values.securityContext.runAsUser }}
runAsGroup: {{ .Values.securityContext.runAsGroup | default 0 }}
runAsNonRoot: {{ .Values.securityContext.runAsNonRoot }}
{{- end }}
args: ["-c", "huge_pages=off"]
env:
- name: POSTGRES_USER
valueFrom:
@ -260,37 +174,8 @@ spec:
secretKeyRef:
name: {{ .Release.Name }}-yugaware-global-config
key: postgres_db
{{- if and (.Values.ocpCompatibility.enabled) (eq .Values.image.postgres.registry "registry.redhat.io") }}
# Hardcoded the POSTGRESQL_USER because it's mandatory env var in RH PG image
# It doesn't have access to create the DB, so YBA fails to create the perf_advisor DB.
# Need to use admin user of RH PG image (postgres)
# Changing the user name won't be possible moving forward for OpenShift certified chart
- name: POSTGRESQL_USER
value: pg-yba
# valueFrom:
# secretKeyRef:
# name: {{ .Release.Name }}-yugaware-global-config
# key: postgres_user
- name: POSTGRESQL_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-yugaware-global-config
key: postgres_password
- name: POSTGRESQL_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-yugaware-global-config
key: postgres_password
- name: POSTGRESQL_DATABASE
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-yugaware-global-config
key: postgres_db
{{- else }}
# The RH Postgres image doesn't allow this directory to be changed.
- name: PGDATA
value: /var/lib/postgresql/data/pgdata
{{- end }}
ports:
- containerPort: 5432
name: postgres
@ -302,17 +187,8 @@ spec:
volumeMounts:
- name: yugaware-storage
{{- if and (.Values.ocpCompatibility.enabled) (eq .Values.image.postgres.registry "registry.redhat.io") }}
mountPath: /var/lib/pgsql/data
subPath: postgres_data_13
{{- else }}
mountPath: /var/lib/postgresql/data
subPath: postgres_data_14
{{- end }}
{{- if .Values.postgres.extraVolumeMounts -}}
{{- include "yugaware.isExtraVolumesMappingExists" .Values.postgres -}}
{{- .Values.postgres.extraVolumeMounts | toYaml | nindent 12 -}}
{{- end -}}
{{ end }}
- name: prometheus
image: {{ include "full_image" (dict "containerName" "prometheus" "root" .) }}
@ -338,9 +214,6 @@ spec:
subPath: prometheus.yml
- name: yugaware-storage
mountPath: /prometheus/
- mountPath: /opt/yugabyte/yugaware/data/keys/
name: yugaware-storage
subPath: data/keys
{{- if .Values.prometheus.scrapeNodes }}
- name: yugaware-storage
mountPath: /opt/yugabyte/prometheus/targets
@ -362,9 +235,6 @@ spec:
- --web.enable-admin-api
- --web.enable-lifecycle
- --storage.tsdb.retention.time={{ .Values.prometheus.retentionTime }}
- --query.max-concurrency={{ .Values.prometheus.queryConcurrency }}
- --query.max-samples={{ .Values.prometheus.queryMaxSamples }}
- --query.timeout={{ .Values.prometheus.queryTimeout }}
ports:
- containerPort: 9090
- name: yugaware
@ -381,18 +251,12 @@ spec:
resources:
{{ toYaml .Values.yugaware.resources | indent 12 }}
{{- end }}
args: ["bin/yugaware","-Dconfig.file=/data/application.docker.conf"]
command: [ "/sbin/tini", "--"]
args:
- "bin/yugaware"
- "-Dconfig.file=/data/application.docker.conf"
env:
# Conditionally set these env variables, if runAsUser is not 0(root)
# or 10001(yugabyte).
{{- if eq (include "checkNssWrapperExportRequired" .) "true" }}
- name: NSS_WRAPPER_GROUP
value: "/tmp/group.template"
- name: NSS_WRAPPER_PASSWD
value: "/tmp/passwd.template"
- name: LD_PRELOAD
value: "/usr/lib64/libnss_wrapper.so"
{{- end }}
- name: POSTGRES_USER
valueFrom:
secretKeyRef:
@ -413,7 +277,6 @@ spec:
secretKeyRef:
name: {{ .Release.Name }}-yugaware-global-config
key: app_secret
{{- with .Values.yugaware.extraEnv }}{{ toYaml . | nindent 12 }}{{ end }}
ports:
- containerPort: 9000
name: yugaware
@ -430,9 +293,6 @@ spec:
- name: yugaware-storage
mountPath: /opt/yugabyte/releases/
subPath: releases
- name: yugaware-storage
mountPath: /opt/yugabyte/ybc/releases/
subPath: ybc_releases
# old path for backward compatibility
- name: yugaware-storage
mountPath: /opt/releases/

View File

@ -1,37 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: {{ .Release.Name }}-yugaware-test
labels:
app: {{ .Release.Name }}-yugaware-test
chart: {{ template "yugaware.chart" . }}
release: {{ .Release.Name }}
annotations:
"helm.sh/hook": test
spec:
imagePullSecrets:
- name: {{ .Values.image.pullSecret }}
containers:
- name: yugaware-test
image: {{ include "full_yugaware_image" . }}
command:
- '/bin/bash'
- '-ec'
- >
sleep 60s;
{{- if .Values.tls.enabled }}
- >
curl --head -k https://{{ .Release.Name }}-yugaware-ui
{{- else }}
- >
curl --head http://{{ .Release.Name }}-yugaware-ui
{{- end }}
# Hard coded resources to the test pod.
resources:
limits:
cpu: "1"
memory: "512Mi"
requests:
cpu: "0.5"
memory: "256Mi"
restartPolicy: Never

View File

@ -1,40 +0,0 @@
suite: Resources verification
templates:
- statefulset.yaml
- configs.yaml
tests:
- it: YBA container
template: statefulset.yaml
asserts:
- isNotEmpty:
path: spec.template.spec.containers[?(@.name == "yugaware")].resources.requests
- it: Postgres container
template: statefulset.yaml
asserts:
- isNotEmpty:
path: spec.template.spec.containers[?(@.name == "postgres")].resources.requests
- it: Prometheus container
template: statefulset.yaml
asserts:
- isNotEmpty:
path: spec.template.spec.containers[?(@.name == "prometheus")].resources.requests
- it: Postgres-init initContainer
template: statefulset.yaml
asserts:
- isNotEmpty:
path: spec.template.spec.initContainers[?(@.name == "postgres-init")].resources.requests
- it: Prometheus-configuration initContainer
template: statefulset.yaml
asserts:
- isNotEmpty:
path: spec.template.spec.initContainers[?(@.name == "prometheus-configuration")].resources.requests
- it: Postgres-upgrade initContainer
template: statefulset.yaml
asserts:
- isNotEmpty:
path: spec.template.spec.initContainers[?(@.name == "postgres-upgrade")].resources.requests

View File

@ -2,22 +2,20 @@
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
fullnameOverride: ""
nameOverride: ""
image:
commonRegistry: ""
# Setting commonRegistry to say, quay.io overrides the registry settings for all images
# including the yugaware image
repository: quay.io/yugabyte/yugaware
tag: 2.18.7.0-b30
tag: 2.14.16.0-b17
pullPolicy: IfNotPresent
pullSecret: yugabyte-k8s-pull-secret
## Docker config JSON File name
## If set, this file content will be used to automatically create secret named as above
pullSecretFile: ""
# pullSecretFile:
postgres:
registry: ""
tag: '14.9'
@ -33,46 +31,36 @@ image:
tag: v2.47.1
name: prom/prometheus
yugaware:
replicas: 1
storage: 100Gi
storageClass: ""
storageAnnotations: {}
multiTenant: false
## Name of existing ServiceAccount. When provided, the chart won't create a ServiceAccount.
## It will attach the required RBAC roles to it.
## Helpful in Yugabyte Platform GKE App.
serviceAccount: ''
serviceAccount: yugaware
serviceMonitor:
enabled: false
annotations: {}
serviceAccountAnnotations: {}
service:
annotations: {}
clusterIP: ""
enabled: true
ip: ""
type: "LoadBalancer"
## whitelist source CIDRs
#loadBalancerSourceRanges:
#- 0.0.0.0/0
#- 192.168.100.0/24
pod:
annotations: {}
labels: {}
health:
username: ""
password: ""
email: ""
resources:
requests:
cpu: "2"
cpu: 2
memory: 4Gi
enableProxyMetricsAuth: true
## List of additional alowed CORS origins in case of complex rev-proxy
additionAllowedCorsOrigins: []
proxyEndpointTimeoutMs: 3 minute
proxyEndpointTimeoutMs: 1 minute
## Enables features specific for cloud deployments
cloud:
enabled: false
@ -83,10 +71,6 @@ yugaware:
# Note that the default of 0 doesn't really make sense since a StatefulSet isn't allowed to schedule extra replicas. However it is maintained as the default while we do additional testing. This value will likely change in the future.
maxUnavailable: 0
universe_boot_script: ""
extraEnv: []
# In case client wants to enable the additional headers to the YBA's http response
# Previously, it was possible via nginx, but given that we no longer have it, we can
# expose the same as application config/runtime config.
@ -95,10 +79,6 @@ yugaware:
## Configure PostgreSQL part of the application
postgres:
# DO NOT CHANGE if using OCP Certified helm chart
user: postgres
dbname: yugaware
service:
## Expose internal Postgres as a Service
enabled: false
@ -111,12 +91,12 @@ postgres:
resources:
requests:
cpu: "0.5"
cpu: 0.5
memory: 1Gi
# If external.host is set then we will connect to an external postgres database server instead of starting our own.
external:
host: ""
host: null
port: 5432
pass: ""
dbname: postgres
@ -125,65 +105,22 @@ postgres:
## JDBC connection parameters including the leading `?`.
jdbcParams: ""
## Extra volumes
## extraVolumesMounts are mandatory for each extraVolumes.
## Ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#volume-v1-core
## Example:
# extraVolumes:
# - name: custom-nfs-vol
# persistentVolumeClaim:
# claimName: some-nfs-claim
extraVolumes: []
## Extra volume mounts
## Ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#volumemount-v1-core
## Example:
# extraVolumeMounts:
# - name: custom-nfs-vol
# mountPath: /home/yugabyte/nfs-backup
extraVolumeMounts: []
tls:
enabled: false
hostname: "localhost"
## Expects base 64 encoded values for certificate and key.
certificate: ""
key: ""
certificate: "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUZDVENDQXZHZ0F3SUJBZ0lVTlhvN2N6T2dyUWQrU09wOWdNdE00b1Vva3hFd0RRWUpLb1pJaHZjTkFRRUwKQlFBd0ZERVNNQkFHQTFVRUF3d0piRzlqWVd4b2IzTjBNQjRYRFRJeE1EUXdOakExTXpnMU4xb1hEVE14TURRdwpOREExTXpnMU4xb3dGREVTTUJBR0ExVUVBd3dKYkc5allXeG9iM04wTUlJQ0lqQU5CZ2txaGtpRzl3MEJBUUVGCkFBT0NBZzhBTUlJQ0NnS0NBZ0VBMUxsSTFBLzRPOVIzSkNlN1N2MUxYVXhDSmxoTWpIWUoxV1FNVmcvai82RHkKazRTTmY0MkFLQjI0dFJFK2lEWTBNaTJrRWhJcVZ4TFdPN0hkWHVSN0tYNGxSZWFVVkRFTUtYUWNQUC9QWDZkbwpwZVZTUFpSVjVHNHNxTElXUFFkTVdIam9IQWx1aml5dGJsSVJUUWdLU3QrMmpuREFDN0dxRURMREdhNXRUWEM2CktRWkNtOERlaklOUTMzaGU2TDN0Q2hBRnhJM1pwY21sR0twbzdKVXJSUG14Mk9zTHFRcTB5dEVVK0lGZGppWHEKaHJLeFR0NUhHM3M3ZUNWaTRXdlZPelVGUitJbWRlQzBRZTBXeG5iZlZUMnJkVitQL1FaVXhWSEVtWnBPc0k2LwpmczhlK1dsMlduWXY1TTg5MWkxZER3Zi9lMDdiN20xQVRKdDRtTGRldzBtd1V4UGFGT2pDMDh6cU94NmF0cGhLClU1eHNWQmhGNVhyME9DeTQyMzN0MU5URXdWUEFDOFcwQmhHdldTRXBQTXNTKzM1b2lueEFrcFQzL01ibFpjNisKcXhSYUh6MHJhSksvVGIzelVKVWxWZFkxbGl5MVYyVjNxWEU2NWlsOUFHZ2pIaHhBNFBwSktCbzZ0WVRUT3pnTworL25mc0toMk95aE8zUWxBZ0JFUHlYUm5wL0xGSTVuQ2gzdjNiOXlabFNrSk05NkVoWEJ1bHhWUWN3L2p3N2NxCkRLSlBEeHFUQy9rWUs1V0FVZGhkWG1KQkRNMFBLcngzUGVOYjRsYnQzSTFIZW1QRDBoZktiWFd6alhiVTJQdWQKdjZmT0dXTDRLSFpaem9KZ1ljMFovRXRUMEpCR09GM09mMW42N2c5dDRlUnAzbEVSL09NM0FPY1dRbWFvOHlVQwpBd0VBQWFOVE1GRXdIUVlEVlIwT0JCWUVGTU00SjA4WG8wUGY1cTlOSWZiMGYyRzZqc1FoTUI4R0ExVWRJd1FZCk1CYUFGTU00SjA4WG8wUGY1cTlOSWZiMGYyRzZqc1FoTUE4R0ExVWRFd0VCL3dRRk1BTUJBZjh3RFFZSktvWkkKaHZjTkFRRUxCUUFEZ2dJQkFBRmxrWVJkdzA0Zm9vT29BelUyaU5ORGV1aiszemhIeFQ5eU9iSkdwREZIRitoZQpuY1ZRWGZpMitHNjBWY0xuZERsWFhmbDZLOSs4ME55aEg4QjR1UEJNTWhoWG01MjJmYnJac1dFcnR3WE1rM2prClZ5UVA3MGk2NHE1ZGVrZzhoYzI0SXhFUlVsam9XM2lDTTdrb0VxaG15VkpGeDNxMVdobFEwdzNkWVpMQVNRclYKU0RpL2JGWjlqOXVtWVdoc0Y4QjFPSThPVjNlL0YyakU1UCtoTlJJazAzbW9zWE1Rdy9iZ3ZzV0hvSkZ5blB4UApHNGUzUjBob2NnbzI0Q2xOQ21YMWFBUms5c1pyN2h0NlVsM1F1d0dMdzZkK2I5emxrUW56TzFXQzc5ekVNU1R0ClRRRzFNT2ZlL2dTVkR3dThTSnpBOHV1Z0pYTktWWkxCZlpaNW41Tk9sOHdpOVVLa1BVUW4wOHo3VWNYVDR5ZnQKZHdrbnZnWDRvMFloUnNQNHpPWDF6eWxObzhqRDhRNlV1SkdQSksrN1JnUm8zVERPV3k4MEZpUzBxRmxrSFdMKwptT0pUWGxzaEpwdHE5b1c1eGx6N1lxTnFwZFVnRmNyTjJLQWNmaGVlNnV3SUFnOFJteTQvRlhRZjhKdXluSG5oClFhVlFnTEpEeHByZTZVNk5EdWg1Y1VsMUZTcWNCUGFPY0x0Q0ViVWg5ckQxajBIdkRnTUUvTTU2TGp1UGdGZlEKMS9xeXlDUkFjc2NCSnVMYjRxcXRUb25tZVZ3T1BBbzBsNXBjcC9JcjRTcTdwM0NML0kwT0o1SEhjcmY3d3JWSgpQVWgzdU1LbWVHVDRyeDdrWlQzQzBXenhUU0loc0lZOU12MVRtelF4MEprQm93c2NYaUYrcXkvUkl5UVgKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo="
key: "LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUpRd0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQ1Mwd2dna3BBZ0VBQW9JQ0FRRFV1VWpVRC9nNzFIY2sKSjd0Sy9VdGRURUltV0V5TWRnblZaQXhXRCtQL29QS1RoSTEvallBb0hiaTFFVDZJTmpReUxhUVNFaXBYRXRZNwpzZDFlNUhzcGZpVkY1cFJVTVF3cGRCdzgvODlmcDJpbDVWSTlsRlhrYml5b3NoWTlCMHhZZU9nY0NXNk9MSzF1ClVoRk5DQXBLMzdhT2NNQUxzYW9RTXNNWnJtMU5jTG9wQmtLYndONk1nMURmZUY3b3ZlMEtFQVhFamRtbHlhVVkKcW1qc2xTdEUrYkhZNnd1cENyVEswUlQ0Z1YyT0plcUdzckZPM2tjYmV6dDRKV0xoYTlVN05RVkg0aVoxNExSQgo3UmJHZHQ5VlBhdDFYNC85QmxURlVjU1ptazZ3anI5K3p4NzVhWFphZGkva3p6M1dMVjBQQi85N1R0dnViVUJNCm0zaVl0MTdEU2JCVEU5b1U2TUxUek9vN0hwcTJtRXBUbkd4VUdFWGxldlE0TExqYmZlM1UxTVRCVThBTHhiUUcKRWE5WklTazh5eEw3Zm1pS2ZFQ1NsUGY4eHVWbHpyNnJGRm9mUFN0b2tyOU52Zk5RbFNWVjFqV1dMTFZYWlhlcApjVHJtS1gwQWFDTWVIRURnK2trb0dqcTFoTk03T0E3NytkK3dxSFk3S0U3ZENVQ0FFUS9KZEdlbjhzVWptY0tICmUvZHYzSm1WS1FrejNvU0ZjRzZYRlZCekQrUER0eW9Nb2s4UEdwTUwrUmdybFlCUjJGMWVZa0VNelE4cXZIYzkKNDF2aVZ1M2NqVWQ2WThQU0Y4cHRkYk9OZHRUWSs1Mi9wODRaWXZnb2Rsbk9nbUJoelJuOFMxUFFrRVk0WGM1LwpXZnJ1RDIzaDVHbmVVUkg4NHpjQTV4WkNacWp6SlFJREFRQUJBb0lDQUFmY2lScDlOSmxSY3MyOVFpaTFUN0cwCi9jVFpBb3MyV1lxdlZkMWdYUGEzaGY5NXFKa01LNjVQMnVHbUwzOXRNV1NoVnl6cnl2REkyMjM5VnNjSS9wdzcKOHppd0dzODV1TTlYWVN2SDhHd0NqZFdEc2hSZ2hRUWFKa0JkeElDZzRtdHFuSGxjeDk4dE80T1dPTmwxOEp0dgp4UmxpaFZacFRIV295cGtLWHpPN2RNWExXMjdTSStkaGV2Mm5QeXF1eWpIVEFjT1AwbmxVQ0d2dThFMjkvWWxoCkNQZVJTQzhKSEVGYWxNSFNWaGpJd2ZBVWJvVVJwZU1ZSE15RjVTK2JncGZiajhSbVVUR09DbHRkWGJnYjhJai8KN0hROEFlQkIrYVFKTDVEVnFRN1JWN1ppQlMwR2ZyODlHdXdEMUs4em9mcktPdURkdXpjR2hwZk9MeGpGdmhTOApSQ2Y1Z3BFMzg0aWlHc2tWZC9mZDJLK3NhSmk0L09HbHo0aHhhc1hDcTN1TXB5OTZPNFRrMXZzM3BXdWZNVmJXCnR2d1Mrcjhvbk9uOXZqa3lqOU11eUpId1BpSlNGMUt0ZzhPUU5WMlVST0xXcHlYMWk4Z2xoMXdSelRTQ2diQnMKZ3ZxWkFvaU1pWFh3SlVXN3Zpb0RLZjI0TnZvcjViaVNzeUh0MHVKUVZJaW1iK1prTFJwTWdwRlkyTlcrTnd6LwoxOW9DS2ZUVVpWNkJia09IK0NoOUowLy9hTTRGNnUvMTI4V0UxalJQU05mdWQ0b0dpdGVPNXRsRDNWSXRsb1hlCjNyWVMrcTNuYXU1RStWc2FRZGFVNzhrSnpXYmUrWURmQ1JwWGd6TkloSkMyQ1k5d0RSK3hIaVFwbzdLSHV6dngKUkpuRjhIcGwzdWhIdWxEam44dEpBb0lCQVFEeGxhVVIwN1l6TGF2OVZtamZCenpZMjcwOU9tWnhpa3NtRnlhWApKTkJMQVB3SGdXOEVCUHdKOEprSDhXR1NTekp1OXZGd1JDVEVqZ1J5dWUvS05DWnNmUWF2UDg3dzhablJHaEhjCklHUUV1MFN3bmJzZXFJK1VWa0M5amZjaFE4dlowM0dQTGZ6bWpsSW9PNkNLTVM3TlV2Ynk5MksvOHRVVWRtWWgKMmJJa2N4V0J1RDJoenh3K1ZId3ArWktMQ0FPZi9sOG8vQ20xQ1dZSFNGdVYzTkl3T016Z2FKaExJODJNR08zQwpuODZTMXcweGc2MHB5dUV6L0hXZS9JMFZkRGNsWlgyNC9jalVBb01kQlkvSGY4Tkh2ZUNhZExQeXI3eGpRY2NLClAzN0RhdFRyK2RTZ2RoVkxzUDRRRzVVZEZxNUlMSHoxTXBkb2xXZ2pDSlZqcTZMekFvSUJBUURoYXNYdVRzMDIKNEkvYkRlSGRZSmw2Q1NzVUh2NmJXL3dpYlRhd2dpbDh5RUNWS2x6eFY4eENwWnoxWVhRQlY1YnVvQlArbjZCWApnVHgzTTJHc2R5UU1xdGRCWG9qdGp1czB6ekFNQVQzOWNmdWlHMGR0YXF3eWJMVlEwYThDZnFmMDVyUmZ0ekVmCmtTUDk2d01kVUEyTGdCbnU4akwzOU41UkxtK2RpZUdxeDAwYmJTa3l5UE9HNHIvcDl6KzN6TmVmeUhmbm94bTkKUnQza1RpeGhVNkd4UGhOSnZpWEUrWUpwT0dKVXMvK2dUWWpjUE1zRW9ONHIyR215cUs3S21NZExFa3Y1SHliWgprbmNsV2FMVFlhNEpjMjJUaWZJd01NTWMwaCtBMkJVckdjZFZ6MTA0UXluUFZQZDdXcEszenhqcjRPUHh1YnQ2CjZvTWk2REdRSVNlSEFvSUJBUURTK1YyVHFQRDMxczNaU3VvQXc2Qld2ZWVRbmZ5eThSUFpxdVFQb0oycXNxeG0KblpsbXlEZVhNcDloK1dHOVVhQTBtY0dWeWx6VnJqU2lRRkR4cEFOZVFQMWlkSFh6b3ZveVN2TUg2dDJONkVELwpnRy9XUVZ4S0xkMFI3UFhCL2lQN0VaV2RkWXJqaWF5ajZCYTJPR2RuOWlrbFcvZklLM2Y4QzczN2w5TGoxQUVYCkxOL2QvREh0R1BqcDYwTVgyYUxZeVZzdlBxL3BvdENRVVpkeDA4dFhRM05nRXRmVTN1cDFpNXV2bU1IZEtLTWoKOTV0MDRQRTA1aWVOOVgzOEcyYkJhTldYaFVJcUxCdDJiOUgxWmxVU3hQWnR6TGNObkgwSHJYejJMU2MxMzRrYwpueXdhQ2FWbFdhYzJSL0E3Mi8vTmxkUjJpWDBDWDEvM0lGcmVGUmtUQW9JQkFBbGt0S2pRbWRhZWx3QU8zUW1uCm05MnRBaUdOaFJpZVJheDlscGpXWTdveWNoYUZOR2hPTzFIUHF2SEN4TjNGYzZHd0JBVkpTNW81NVhZbUt2elAKM2kyMDlORmhpaDAwSm5NRjZ6K2swWnQ5STNwRzNyd2RoTjE1RURrMDg3RUw3QjNWZTFDOXhvdEZOaFcvdEZxRgpXbnNrdEcvem9kSVpYeVpNNUJQUmloamV3MFRRVUxZd0Q0M2daeFR0MjdiaUQxNDJNV0R5dUFEZU1pTHdhd01IClJDYXBxbzRaSVdQSzdmZEtoVFo0WmIrZFc0V3A5dC9UZ0U2ZGJ4SWwyMXJQOFFZYzFoT2tpNjduWHBXczNZOG4KYytRcTdqY0d1WlB1aEVMd01xWGcyMGozZ3duOVlTb1dDbWo4Wm0rNmY0Q3ZYWjkrdUtEN0YyZncyOVFaanU4dApvb01DZ2dFQkFPbVVHZ1VoT0tUVys1eEpkZlFKRUVXUncyVFF6Z2l6dSt3aVkzaDYrYXNTejRNY0srVGx6bWxVCmFHT013dFhTUzc0RXIxVmlCVXMrZnJKekFPR21IV0ExZWdtaGVlY1BvaE9ybTh5WkVueVJOSkRhWC9UUXBSUnEKaVdoWENBbjJTWFQxcFlsYVBzMjdkbXpFWnQ3UlVUSkJZZ1hHZXQ4dXFjUXZaVDJZK3N6cHFNV3UzaEpWdmIxdgpZNGRJWE12RG1aV1BPVjFwbHJEaTVoc214VW05TDVtWk1IblllNzFOYkhsaEIxK0VUNXZmWFZjOERzU1RRZWRRCitDRHJKNGQ0em85dFNCa2pwYTM5M2RDRjhCSURESUQyWkVJNCtBVW52NWhTNm82NitOLzBONlp3cXkwc2pKY0cKQ21LeS9tNUpqVzFJWDMxSmZ1UU5Ldm9YNkRFN0Zkaz0KLS0tLS1FTkQgUFJJVkFURSBLRVktLS0tLQo="
sslProtocols: "" # if set, override default Nginx SSL protocols setting
## cert-manager values
## If cert-manager is enabled:
## If genSelfsigned: true:
## Create a self-signed issuer/clusterIssuer
## Generate a rootCA using the above issuer.
## Generate a tls certificate with secret name as: {{ .Release.Name }}-yugaware-tls-cert
## Else if genSelfsigned: false:
## Expect a clusterIssuer/issuer to be provided by user
## Generate a tls cert based on above issuer with secret name as: {{ .Release.Name }}-yugaware-tls-cert
certManager:
enabled: false
genSelfsigned: true
useClusterIssuer: false
clusterIssuer: cluster-ca
issuer: yugaware-ca
## Configuration for the TLS certificate requested from Issuer/ClusterIssuer
configuration:
duration: 8760h # 90d
renewBefore: 240h # 15d
algorithm: RSA # ECDSA or RSA
# Can be 2048, 4096 or 8192 for RSA
# Or 256, 384 or 521 for ECDSA
keySize: 2048
## yugaware pod Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
securityContext:
enabled: true
enabled: false
## fsGroup related values are set at the pod level.
fsGroup: 10001
fsGroupChangePolicy: "OnRootMismatch"
## Expected to have runAsUser values != 0 when
## runAsNonRoot is set to true, otherwise container creation fails.
## The following values are set for yugaware and prometheus containers.
## Setting runAsUser other than 10001 will fail the VM universe deployment flow.
runAsUser: 10001
runAsGroup: 10001
runAsNonRoot: true
@ -213,66 +150,15 @@ ocpCompatibility:
# Extra containers to add to the pod.
sidecars: []
## Following two controls for placement of pod - nodeSelector and AZ affinity.
## Note: Remember to also provide a yugaware.StorageClass that has a olumeBindingMode of
## WaitForFirstConsumer so that the PVC is created in the right topology visible to this pod.
## See https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
## eg.
## nodeSelector:
## topology.kubernetes.io/region: us-west1
nodeSelector: {}
## Affinity to a particular zone for the pod.
## See https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
## eg.
## nodeAffinity:
## requiredDuringSchedulingIgnoredDuringExecution:
## nodeSelectorTerms:
## - matchExpressions:
## - key: failure-domain.beta.kubernetes.io/zone
## operator: In
## values:
## - us-west1-a
## - us-west1-b
zoneAffinity: {}
## The tolerations that the pod should have.
## See https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
tolerations: []
## @param dnsPolicy DNS Policy for pod
## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/
## E.g.
## dnsPolicy: ClusterFirst
dnsPolicy: ""
## @param dnsConfig DNS Configuration pod
## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/
## E.g.
## dnsConfig:
## options:
## - name: ndots
## value: "4"
dnsConfig: {}
## Don't want prometheus to scrape nodes and evaluate alert rules in some cases (for example - cloud).
prometheus:
## Setting this to false will disable scraping of TServer and Master
## nodes (could be pods or VMs)
scrapeNodes: true
evaluateAlertRules: true
retentionTime: 15d
queryConcurrency: 20
queryMaxSamples: 5000000
queryTimeout: 30s
## Set this to false to disable scraping of Kubernetes worker
## nodes. Setting this to false will results in blank graphs of
## resource utilization for Kubernetes universes. Useful for
## scenarios where only VM based universes are being created.
scrapeKubernetesNodes: true
resources:
requests:
cpu: "2"
cpu: 2
memory: 4Gi
## Prometheus remote write config, as described here:
@ -293,10 +179,8 @@ prometheus:
# Arbitrary key=value config entries for application.docker.conf
additionalAppConf:
stringConf: {}
nonStringConf: {}
jdbcParams: ""
stringConf:
nonStringConf:
## Override the APIVersion used by policy group for
## PodDisruptionBudget resources. The chart selects the correct
@ -304,25 +188,3 @@ jdbcParams: ""
## to modify this unless you are using helm template command i.e. GKE
## app's deployer image against a Kubernetes cluster >= 1.21.
# pdbPolicyVersionOverride: "v1beta1"
pdbPolicyVersionOverride: ""
initContainers:
prometheusConfiguration:
resources:
## https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-requests-and-limits-of-pod-and-container
## Use the above link to learn more about Kubernetes resources configuration.
requests:
cpu: "0.25"
memory: 500Mi
postgresUpgrade:
resources:
requests:
cpu: "0.5"
memory: 500Mi
postgresInit:
resources:
requests:
cpu: "0.25"
memory: 500Mi

Some files were not shown because too many files have changed in this diff Show More