Added chart versions:

yugabyte/yugabyte:
    - 2.18.9
  yugabyte/yugaware:
    - 2.18.9
pull/1059/head
github-actions[bot] 2024-08-16 00:49:14 +00:00
parent 454c762f75
commit 5982611810
48 changed files with 5634 additions and 1 deletions

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1 @@
tests

View File

@ -0,0 +1,22 @@
annotations:
catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: YugabyteDB
catalog.cattle.io/kube-version: '>=1.18-0'
catalog.cattle.io/release-name: yugabyte
charts.openshift.io/name: yugabyte
apiVersion: v2
appVersion: 2.18.9.0-b17
description: YugabyteDB is the high-performance distributed SQL database for building
global, internet-scale apps.
home: https://www.yugabyte.com
icon: file://assets/icons/yugabyte.jpg
kubeVersion: '>=1.18-0'
maintainers:
- email: sanketh@yugabyte.com
name: Sanketh Indarapu
- email: gjalla@yugabyte.com
name: Govardhan Reddy Jalla
name: yugabyte
sources:
- https://github.com/yugabyte/yugabyte-db
version: 2.18.9

View File

@ -0,0 +1 @@
YugabyteDB can be deployed in various Kubernetes configurations (including single zone, multi-zone and multi-cluster) using this Helm Chart. Detailed documentation is available in [YugabyteDB Docs for Kubernetes Deployments](https://docs.yugabyte.com/latest/deploy/kubernetes/).

View File

@ -0,0 +1 @@
This chart bootstraps an RF3 YugabyteDB version 2.18.9.0-b17 cluster using the Helm Package Manager.

View File

@ -0,0 +1,14 @@
serviceEndpoints:
- name: "yb-master-service"
type: LoadBalancer
app: "yb-master"
ports:
ui: "7000"
- name: "yb-tserver-service"
type: LoadBalancer
app: "yb-tserver"
ports:
yql-port: "9042"
yedis-port: "6379"
ysql-port: "5433"

View File

@ -0,0 +1,24 @@
serviceEndpoints:
- name: "yb-master-ui"
type: LoadBalancer
app: "yb-master"
ports:
ui: "7000"
- name: "yql-service"
type: LoadBalancer
app: "yb-tserver"
ports:
yql-port: "9042"
- name: "yedis-service"
type: LoadBalancer
app: "yb-tserver"
ports:
yedis-port: "6379"
- name: "ysql-service"
type: LoadBalancer
app: "yb-tserver"
ports:
ysql-port: "5433"

View File

@ -0,0 +1,219 @@
#!/usr/bin/python
# Copyright (c) YugaByte, Inc.
# This script would generate a kubeconfig for the given servie account
# by fetching the cluster information and also add the service account
# token for the authentication purpose.
import argparse
from subprocess import check_output
from sys import exit
import json
import base64
import tempfile
import time
import os.path
def run_command(command_args, namespace=None, as_json=True, log_command=True):
command = ["kubectl"]
if namespace:
command.extend(["--namespace", namespace])
command.extend(command_args)
if as_json:
command.extend(["-o", "json"])
if log_command:
print("Running command: {}".format(" ".join(command)))
output = check_output(command)
if as_json:
return json.loads(output)
else:
return output.decode("utf8")
def create_sa_token_secret(directory, sa_name, namespace):
"""Creates a service account token secret for sa_name in
namespace. Returns the name of the secret created.
Ref:
https://k8s.io/docs/concepts/configuration/secret/#service-account-token-secrets
"""
token_secret = {
"apiVersion": "v1",
"data": {
"do-not-delete-used-for-yugabyte-anywhere": "MQ==",
},
"kind": "Secret",
"metadata": {
"annotations": {
"kubernetes.io/service-account.name": sa_name,
},
"name": sa_name,
},
"type": "kubernetes.io/service-account-token",
}
token_secret_file_name = os.path.join(directory, "token_secret.yaml")
with open(token_secret_file_name, "w") as token_secret_file:
json.dump(token_secret, token_secret_file)
run_command(["apply", "-f", token_secret_file_name], namespace)
return sa_name
def get_secret_data(secret, namespace):
"""Returns the secret in JSON format if it has ca.crt and token in
it, else returns None. It retries 3 times with 1 second timeout
for the secret to be populated with this data.
"""
secret_data = None
num_retries = 5
timeout = 2
while True:
secret_json = run_command(["get", "secret", secret], namespace)
if "ca.crt" in secret_json["data"] and "token" in secret_json["data"]:
secret_data = secret_json
break
num_retries -= 1
if num_retries == 0:
break
print(
"Secret '{}' is not populated. Sleep {}s, ({} retries left)".format(
secret, timeout, num_retries
)
)
time.sleep(timeout)
return secret_data
def get_secrets_for_sa(sa_name, namespace):
"""Returns a list of all service account token secrets associated
with the given sa_name in the namespace.
"""
secrets = run_command(
[
"get",
"secret",
"--field-selector",
"type=kubernetes.io/service-account-token",
"-o",
'jsonpath="{.items[?(@.metadata.annotations.kubernetes\.io/service-account\.name == "'
+ sa_name
+ '")].metadata.name}"',
],
as_json=False,
)
return secrets.strip('"').split()
parser = argparse.ArgumentParser(description="Generate KubeConfig with Token")
parser.add_argument("-s", "--service_account", help="Service Account name", required=True)
parser.add_argument("-n", "--namespace", help="Kubernetes namespace", default="kube-system")
parser.add_argument("-c", "--context", help="kubectl context")
parser.add_argument("-o", "--output_file", help="output file path")
args = vars(parser.parse_args())
# if the context is not provided we use the current-context
context = args["context"]
if context is None:
context = run_command(["config", "current-context"], args["namespace"], as_json=False)
cluster_attrs = run_command(
["config", "get-contexts", context.strip(), "--no-headers"], args["namespace"], as_json=False
)
cluster_name = cluster_attrs.strip().split()[2]
endpoint = run_command(
[
"config",
"view",
"-o",
'jsonpath="{.clusters[?(@.name =="' + cluster_name + '")].cluster.server}"',
],
args["namespace"],
as_json=False,
)
service_account_info = run_command(["get", "sa", args["service_account"]], args["namespace"])
tmpdir = tempfile.TemporaryDirectory()
# Get the token and ca.crt from service account secret.
sa_secrets = list()
# Get secrets specified in the service account, there can be multiple
# of them, and not all are service account token secrets.
if "secrets" in service_account_info:
sa_secrets = [secret["name"] for secret in service_account_info["secrets"]]
# Find the existing additional service account token secrets
sa_secrets.extend(get_secrets_for_sa(args["service_account"], args["namespace"]))
secret_data = None
for secret in sa_secrets:
secret_data = get_secret_data(secret, args["namespace"])
if secret_data is not None:
break
# Kubernetes 1.22+ doesn't create the service account token secret by
# default, we have to create one.
if secret_data is None:
print("No usable secret found for '{}', creating one.".format(args["service_account"]))
token_secret = create_sa_token_secret(tmpdir.name, args["service_account"], args["namespace"])
secret_data = get_secret_data(token_secret, args["namespace"])
if secret_data is None:
exit(
"Failed to generate kubeconfig: No usable credentials found for '{}'.".format(
args["service_account"]
)
)
context_name = "{}-{}".format(args["service_account"], cluster_name)
kube_config = args["output_file"]
if not kube_config:
kube_config = "/tmp/{}.conf".format(args["service_account"])
ca_crt_file_name = os.path.join(tmpdir.name, "ca.crt")
ca_crt_file = open(ca_crt_file_name, "wb")
ca_crt_file.write(base64.b64decode(secret_data["data"]["ca.crt"]))
ca_crt_file.close()
# create kubeconfig entry
set_cluster_cmd = [
"config",
"set-cluster",
cluster_name,
"--kubeconfig={}".format(kube_config),
"--server={}".format(endpoint.strip('"')),
"--embed-certs=true",
"--certificate-authority={}".format(ca_crt_file_name),
]
run_command(set_cluster_cmd, as_json=False)
user_token = base64.b64decode(secret_data["data"]["token"]).decode("utf-8")
set_credentials_cmd = [
"config",
"set-credentials",
context_name,
"--token={}".format(user_token),
"--kubeconfig={}".format(kube_config),
]
run_command(set_credentials_cmd, as_json=False, log_command=False)
set_context_cmd = [
"config",
"set-context",
context_name,
"--cluster={}".format(cluster_name),
"--user={}".format(context_name),
"--kubeconfig={}".format(kube_config),
]
run_command(set_context_cmd, as_json=False)
use_context_cmd = ["config", "use-context", context_name, "--kubeconfig={}".format(kube_config)]
run_command(use_context_cmd, as_json=False)
print("Generated the kubeconfig file: {}".format(kube_config))

View File

@ -0,0 +1,4 @@
# OCP compatible values for yugabyte
Image:
repository: "quay.io/yugabyte/yugabyte-ubi"

View File

@ -0,0 +1,174 @@
---
questions:
## Default images for yugabyte
- variable: questions.defaultYugabyteDB
default: true
description: "Use default settings for YugabyteDB"
label: Use default
type: boolean
show_subquestion_if: false
group: "YugabyteDB"
subquestions:
- variable: Image.repository
default: "yugabytedb/yugabyte"
required: true
type: string
label: YugabyteDB image repository
description: "YugabyteDB image repository"
- variable: Image.tag
default: "2.5.1.0-b153"
required: true
type: string
label: YugabyteDB image tag
description: "YugabyteDB image tag"
- variable: Image.pullPolicy
default: "IfNotPresent"
required: false
type: enum
options:
- "Always"
- "IfNotPresent"
label: YugabyteDB image pull policy
description: "YugabyteDB image pull policy"
- variable: storage.ephemeral
default: false
required: false
type: boolean
label: YugabyteDB ephemeral storage
description: "Won't allocate PVs when true"
- variable: replicas.master
default: 3
description: "Number of replicas for Master"
type: int
required: true
label: Replication Factor Master
- variable: replicas.tserver
default: 3
description: "Number of replicas for TServer"
type: int
required: true
label: Replication Factor TServer
- variable: statefulSetAnnotations
description: Annotations for the StatefulSet
type: dict
required: false
label: "Annotations for the StatefulSet"
- variable: questions.defaultMasterStorage
default: true
description: "Use default storage configurations for YugabyteDB Master"
label: Use default storage configurations
type: boolean
show_subquestion_if: false
group: "Master Storage"
subquestions:
- variable: storage.master.count
default: 2
required: true
type: int
label: YugabyteDB master storage disk count
description: "YugabyteDB master storage disk count"
- variable: storage.master.size
default: "10Gi"
required: true
type: string
label: YugabyteDB master storage size
description: "YugabyteDB master storage size"
- variable: storage.master.storageClass
default: ""
required: false
type: storageclass
label: YugabyteDB master storage class
description: "YugabyteDB master storage class"
- variable: questions.defaultTServerStorage
default: true
description: "Use default storage configurations for YugabyteDB TServer"
label: Use default storage configuration
type: boolean
show_subquestion_if: false
group: "TServer Storage"
subquestions:
- variable: storage.tserver.count
default: 2
required: true
type: int
label: YugabyteDB TServer storage disk count
description: "YugabyteDB TServer storage disk count"
- variable: storage.tserver.size
default: "10Gi"
required: true
type: string
label: YugabyteDB TServer storage size
description: "YugabyteDB TServer storage size"
- variable: storage.tserver.storageClass
default: ""
required: false
type: storageclass
label: YugabyteDB TServer storage class
description: "YugabyteDB TServer storage class"
## Default resources
- variable: questions.defaultResources
default: true
description: "Use default resources for YugabyteDB"
label: Use default resources
type: boolean
show_subquestion_if: false
group: "Resources"
subquestions:
- variable: resource.master.requests.cpu
default: "2"
description: "Master vcpu allocation for YugabyteDB"
type: string
required: true
label: vcpu allocation for master
- variable: resource.master.requests.memory
default: "2Gi"
description: "Master RAM allocation for YugabyteDB"
type: string
required: true
label: RAM allocation for master
- variable: resource.tserver.requests.cpu
default: "2"
description: "TServer vcpu allocation for YugabyteDB"
type: string
required: true
label: vcpu allocation for tserver
- variable: resource.tserver.requests.memory
default: "4Gi"
description: "TServer RAM allocation for YugabyteDB"
type: string
required: true
label: RAM allocation for tserver
## TLS
- variable: tls.enabled
default: false
description: "Enable TLS - TLS disabled by default"
label: Enable TLS
type: boolean
show_subquestion_if: true
group: "TLS"
subquestions:
- variable: tls.nodeToNode
default: true
description: "Node to Node"
type: boolean
required: false
label: Node to Node
- variable: tls.clientToServer
default: true
description: "Client to server"
type: boolean
required: false
label: Client to server
- variable: tls.insecure
default: false
description: "Insecure - no service will connect on unencrypted connection"
type: boolean
required: false
label: Insecure communication
- variable: tls.certManager.enabled
default: false
description: "Use cert-manager to provide cluster certificates"
type: boolean
required: false
label: Cert-Manager Support

View File

@ -0,0 +1,25 @@
1. Get YugabyteDB Pods by running this command:
kubectl --namespace {{ .Release.Namespace }} get pods
2. Get list of YugabyteDB services that are running:
kubectl --namespace {{ .Release.Namespace }} get services
3. Get information about the load balancer services:
kubectl get svc --namespace {{ .Release.Namespace }}
4. Connect to one of the tablet server:
kubectl exec --namespace {{ .Release.Namespace }} -it yb-tserver-0 bash
5. Run YSQL shell from inside of a tablet server:
kubectl exec --namespace {{ .Release.Namespace }} -it yb-tserver-0 -- /home/yugabyte/bin/ysqlsh -h yb-tserver-0.yb-tservers.{{ .Release.Namespace }}
6. Cleanup YugabyteDB Pods
For helm 2:
helm delete {{ .Release.Name }} --purge
For helm 3:
helm delete {{ .Release.Name }} -n {{ .Release.Namespace }}
NOTE: You need to manually delete the persistent volume
{{- $root := . -}}
{{- range .Values.Services }}
kubectl delete pvc --namespace {{ $root.Release.Namespace }} -l app={{.label}}
{{- end }}

View File

@ -0,0 +1,415 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
The components in this chart create additional resources that expand the longest created name strings.
The longest name that gets created of 20 characters, so truncation should be 63-20=43.
*/}}
{{- define "yugabyte.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 43 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 43 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 43 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Generate common labels.
*/}}
{{- define "yugabyte.labels" }}
heritage: {{ .Values.helm2Legacy | ternary "Tiller" (.Release.Service | quote) }}
release: {{ .Release.Name | quote }}
chart: {{ .Chart.Name | quote }}
component: {{ .Values.Component | quote }}
{{- if .Values.commonLabels}}
{{ toYaml .Values.commonLabels }}
{{- end }}
{{- end }}
{{/*
Generate app label.
*/}}
{{- define "yugabyte.applabel" }}
{{- if .root.Values.oldNamingStyle }}
app: "{{ .label }}"
{{- else }}
app.kubernetes.io/name: "{{ .label }}"
{{- end }}
{{- end }}
{{/*
Generate app selector.
*/}}
{{- define "yugabyte.appselector" }}
{{- if .root.Values.oldNamingStyle }}
app: "{{ .label }}"
{{- else }}
app.kubernetes.io/name: "{{ .label }}"
release: {{ .root.Release.Name | quote }}
{{- end }}
{{- end }}
{{/*
Create secrets in DBNamespace from other namespaces by iterating over envSecrets.
*/}}
{{- define "yugabyte.envsecrets" -}}
{{- range $v := .secretenv }}
{{- if $v.valueFrom.secretKeyRef.namespace }}
{{- $secretObj := (lookup
"v1"
"Secret"
$v.valueFrom.secretKeyRef.namespace
$v.valueFrom.secretKeyRef.name)
| default dict }}
{{- $secretData := (get $secretObj "data") | default dict }}
{{- $secretValue := (get $secretData $v.valueFrom.secretKeyRef.key) | default "" }}
{{- if (and (not $secretValue) (not $v.valueFrom.secretKeyRef.optional)) }}
{{- required (printf "Secret or key missing for %s/%s in namespace: %s"
$v.valueFrom.secretKeyRef.name
$v.valueFrom.secretKeyRef.key
$v.valueFrom.secretKeyRef.namespace)
nil }}
{{- end }}
{{- if $secretValue }}
apiVersion: v1
kind: Secret
metadata:
{{- $secretfullname := printf "%s-%s-%s-%s"
$.root.Release.Name
$v.valueFrom.secretKeyRef.namespace
$v.valueFrom.secretKeyRef.name
$v.valueFrom.secretKeyRef.key
}}
name: {{ printf "%s-%s-%s-%s-%s-%s"
$.root.Release.Name
($v.valueFrom.secretKeyRef.namespace | substr 0 5)
($v.valueFrom.secretKeyRef.name | substr 0 5)
( $v.valueFrom.secretKeyRef.key | substr 0 5)
(sha256sum $secretfullname | substr 0 4)
($.suffix)
| lower | replace "." "" | replace "_" ""
}}
namespace: "{{ $.root.Release.Namespace }}"
labels:
{{- include "yugabyte.labels" $.root | indent 4 }}
type: Opaque # should it be an Opaque secret?
data:
{{ $v.valueFrom.secretKeyRef.key }}: {{ $secretValue | quote }}
{{- end }}
{{- end }}
---
{{- end }}
{{- end }}
{{/*
Add env secrets to DB statefulset.
*/}}
{{- define "yugabyte.addenvsecrets" -}}
{{- range $v := .secretenv }}
- name: {{ $v.name }}
valueFrom:
secretKeyRef:
{{- if $v.valueFrom.secretKeyRef.namespace }}
{{- $secretfullname := printf "%s-%s-%s-%s"
$.root.Release.Name
$v.valueFrom.secretKeyRef.namespace
$v.valueFrom.secretKeyRef.name
$v.valueFrom.secretKeyRef.key
}}
name: {{ printf "%s-%s-%s-%s-%s-%s"
$.root.Release.Name
($v.valueFrom.secretKeyRef.namespace | substr 0 5)
($v.valueFrom.secretKeyRef.name | substr 0 5)
($v.valueFrom.secretKeyRef.key | substr 0 5)
(sha256sum $secretfullname | substr 0 4)
($.suffix)
| lower | replace "." "" | replace "_" ""
}}
{{- else }}
name: {{ $v.valueFrom.secretKeyRef.name }}
{{- end }}
key: {{ $v.valueFrom.secretKeyRef.key }}
optional: {{ $v.valueFrom.secretKeyRef.optional | default "false" }}
{{- end }}
{{- end }}
{{/*
Create Volume name.
*/}}
{{- define "yugabyte.volume_name" -}}
{{- printf "%s-datadir" (include "yugabyte.fullname" .) -}}
{{- end -}}
{{/*
Derive the memory hard limit for each POD based on the memory limit.
Since the memory is represented in <x>GBi, we use this function to convert that into bytes.
Multiplied by 870 since 0.85 * 1024 ~ 870 (floating calculations not supported).
*/}}
{{- define "yugabyte.memory_hard_limit" -}}
{{- printf "%d" .limits.memory | regexFind "\\d+" | mul 1024 | mul 1024 | mul 870 -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "yugabyte.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Generate a preflight check script invocation.
*/}}
{{- define "yugabyte.preflight_check" -}}
{{- if not .Values.preflight.skipAll -}}
{{- $port := .Preflight.Port -}}
{{- range $addr := split "," .Preflight.Addr -}}
if [ -f /home/yugabyte/tools/k8s_preflight.py ]; then
PYTHONUNBUFFERED="true" /home/yugabyte/tools/k8s_preflight.py \
dnscheck \
--addr="{{ $addr }}" \
{{- if not $.Values.preflight.skipBind }}
--port="{{ $port }}"
{{- else }}
--skip_bind
{{- end }}
fi && \
{{ end }}
{{- end }}
{{- end }}
{{/*
Get YugaByte fs data directories.
*/}}
{{- define "yugabyte.fs_data_dirs" -}}
{{- range $index := until (int (.count)) -}}
{{- if ne $index 0 }},{{ end }}/mnt/disk{{ $index -}}
{{- end -}}
{{- end -}}
{{/*
Get files from fs data directories for readiness / liveness probes.
*/}}
{{- define "yugabyte.fs_data_dirs_probe_files" -}}
{{- range $index := until (int (.count)) -}}
{{- if ne $index 0 }} {{ end }}"/mnt/disk{{ $index -}}/disk.check"
{{- end -}}
{{- end -}}
{{/*
Command to do a disk write and sync for liveness probes.
*/}}
{{- define "yugabyte.fs_data_dirs_probe" -}}
echo "disk check at: $(date)" \
| tee {{ template "yugabyte.fs_data_dirs_probe_files" . }} \
&& sync {{ template "yugabyte.fs_data_dirs_probe_files" . }}
{{- end -}}
{{/*
Generate server FQDN.
*/}}
{{- define "yugabyte.server_fqdn" -}}
{{- if .Values.multicluster.createServicePerPod -}}
{{- printf "$(HOSTNAME).$(NAMESPACE).svc.%s" .Values.domainName -}}
{{- else if (and .Values.oldNamingStyle .Values.multicluster.createServiceExports) -}}
{{ $membershipName := required "A valid membership name is required! Please set multicluster.kubernetesClusterId" .Values.multicluster.kubernetesClusterId }}
{{- printf "$(HOSTNAME).%s.%s.$(NAMESPACE).svc.clusterset.local" $membershipName .Service.name -}}
{{- else if .Values.oldNamingStyle -}}
{{- printf "$(HOSTNAME).%s.$(NAMESPACE).svc.%s" .Service.name .Values.domainName -}}
{{- else -}}
{{- if .Values.multicluster.createServiceExports -}}
{{ $membershipName := required "A valid membership name is required! Please set multicluster.kubernetesClusterId" .Values.multicluster.kubernetesClusterId }}
{{- printf "$(HOSTNAME).%s.%s-%s.$(NAMESPACE).svc.clusterset.local" $membershipName (include "yugabyte.fullname" .) .Service.name -}}
{{- else -}}
{{- printf "$(HOSTNAME).%s-%s.$(NAMESPACE).svc.%s" (include "yugabyte.fullname" .) .Service.name .Values.domainName -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Generate server broadcast address.
*/}}
{{- define "yugabyte.server_broadcast_address" -}}
{{- include "yugabyte.server_fqdn" . }}:{{ index .Service.ports "tcp-rpc-port" -}}
{{- end -}}
{{/*
Generate server RPC bind address.
In case of multi-cluster services (MCS), we set it to $(POD_IP) to
ensure YCQL uses a resolvable address.
See https://github.com/yugabyte/yugabyte-db/issues/16155
We use a workaround for above in case of Istio by setting it to
$(POD_IP) and localhost. Master doesn't support that combination, so
we stick to 0.0.0.0, which works for master.
*/}}
{{- define "yugabyte.rpc_bind_address" -}}
{{- $port := index .Service.ports "tcp-rpc-port" -}}
{{- if .Values.istioCompatibility.enabled -}}
{{- if (eq .Service.name "yb-masters") -}}
0.0.0.0:{{ $port }}
{{- else -}}
$(POD_IP):{{ $port }},127.0.0.1:{{ $port }}
{{- end -}}
{{- else if (or .Values.multicluster.createServiceExports .Values.multicluster.createServicePerPod) -}}
$(POD_IP):{{ $port }}
{{- else -}}
{{- include "yugabyte.server_fqdn" . -}}
{{- end -}}
{{- end -}}
{{/*
Generate server web interface.
*/}}
{{- define "yugabyte.webserver_interface" -}}
{{- eq .Values.ip_version_support "v6_only" | ternary "[::]" "0.0.0.0" -}}
{{- end -}}
{{/*
Generate server CQL proxy bind address.
*/}}
{{- define "yugabyte.cql_proxy_bind_address" -}}
{{- if or .Values.istioCompatibility.enabled .Values.multicluster.createServiceExports .Values.multicluster.createServicePerPod -}}
0.0.0.0:{{ index .Service.ports "tcp-yql-port" -}}
{{- else -}}
{{- include "yugabyte.server_fqdn" . -}}
{{- end -}}
{{- end -}}
{{/*
Generate server PGSQL proxy bind address.
*/}}
{{- define "yugabyte.pgsql_proxy_bind_address" -}}
{{- eq .Values.ip_version_support "v6_only" | ternary "[::]" "0.0.0.0" -}}:{{ index .Service.ports "tcp-ysql-port" -}}
{{- end -}}
{{/*
Get YugaByte master addresses
*/}}
{{- define "yugabyte.master_addresses" -}}
{{- $master_replicas := .Values.replicas.master | int -}}
{{- $domain_name := .Values.domainName -}}
{{- $newNamingStylePrefix := printf "%s-" (include "yugabyte.fullname" .) -}}
{{- $prefix := ternary "" $newNamingStylePrefix $.Values.oldNamingStyle -}}
{{- range .Values.Services -}}
{{- if eq .name "yb-masters" -}}
{{- range $index := until $master_replicas -}}
{{- if ne $index 0 }},{{ end -}}
{{- $prefix }}yb-master-{{ $index }}.{{ $prefix }}yb-masters.$(NAMESPACE).svc.{{ $domain_name }}:7100
{{- end -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Compute the maximum number of unavailable pods based on the number of master replicas
*/}}
{{- define "yugabyte.max_unavailable_for_quorum" -}}
{{- $master_replicas_100x := .Values.replicas.master | int | mul 100 -}}
{{- $max_unavailable_master_replicas := 100 | div (100 | sub (2 | div ($master_replicas_100x | add 100))) -}}
{{- printf "%d" $max_unavailable_master_replicas -}}
{{- end -}}
{{/*
Set consistent issuer name.
*/}}
{{- define "yugabyte.tls_cm_issuer" -}}
{{- if .Values.tls.certManager.bootstrapSelfsigned -}}
{{ .Values.oldNamingStyle | ternary "yugabyte-selfsigned" (printf "%s-selfsigned" (include "yugabyte.fullname" .)) }}
{{- else -}}
{{ .Values.tls.certManager.useClusterIssuer | ternary .Values.tls.certManager.clusterIssuer .Values.tls.certManager.issuer}}
{{- end -}}
{{- end -}}
{{/*
Verify the extraVolumes and extraVolumeMounts mappings.
Every extraVolumes should have extraVolumeMounts
*/}}
{{- define "yugabyte.isExtraVolumesMappingExists" -}}
{{- $lenExtraVolumes := len .extraVolumes -}}
{{- $lenExtraVolumeMounts := len .extraVolumeMounts -}}
{{- if and (eq $lenExtraVolumeMounts 0) (gt $lenExtraVolumes 0) -}}
{{- fail "You have not provided the extraVolumeMounts for extraVolumes." -}}
{{- else if and (eq $lenExtraVolumes 0) (gt $lenExtraVolumeMounts 0) -}}
{{- fail "You have not provided the extraVolumes for extraVolumeMounts." -}}
{{- else if and (gt $lenExtraVolumes 0) (gt $lenExtraVolumeMounts 0) -}}
{{- $volumeMountsList := list -}}
{{- range .extraVolumeMounts -}}
{{- $volumeMountsList = append $volumeMountsList .name -}}
{{- end -}}
{{- $volumesList := list -}}
{{- range .extraVolumes -}}
{{- $volumesList = append $volumesList .name -}}
{{- end -}}
{{- range $volumesList -}}
{{- if not (has . $volumeMountsList) -}}
{{- fail (printf "You have not provided the extraVolumeMounts for extraVolume %s" .) -}}
{{- end -}}
{{- end -}}
{{- range $volumeMountsList -}}
{{- if not (has . $volumesList) -}}
{{- fail (printf "You have not provided the extraVolumes for extraVolumeMounts %s" .) -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Default nodeAffinity for multi-az deployments
*/}}
{{- define "yugabyte.multiAZNodeAffinity" -}}
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: failure-domain.beta.kubernetes.io/zone
operator: In
values:
- {{ quote .Values.AZ }}
- matchExpressions:
- key: topology.kubernetes.io/zone
operator: In
values:
- {{ quote .Values.AZ }}
{{- end -}}
{{/*
Default podAntiAffinity for master and tserver
This requires "appLabelArgs" to be passed in - defined in service.yaml
we have a .root and a .label in appLabelArgs
*/}}
{{- define "yugabyte.podAntiAffinity" -}}
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
{{- if .root.Values.oldNamingStyle }}
- key: app
operator: In
values:
- "{{ .label }}"
{{- else }}
- key: app.kubernetes.io/name
operator: In
values:
- "{{ .label }}"
- key: release
operator: In
values:
- {{ .root.Release.Name | quote }}
{{- end }}
topologyKey: kubernetes.io/hostname
{{- end -}}

View File

@ -0,0 +1,150 @@
{{- $root := . -}}
---
{{- if $root.Values.tls.certManager.enabled }}
{{- if $root.Values.tls.certManager.bootstrapSelfsigned }}
---
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: {{ (printf "%s-bootstrap" (include "yugabyte.tls_cm_issuer" $root)) | quote }}
namespace: "{{ $root.Release.Namespace }}"
spec:
selfSigned: {}
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: {{ $root.Values.oldNamingStyle | ternary "yugabyte-ca" (printf "%s-ca" (include "yugabyte.fullname" $root)) }}
namespace: "{{ $root.Release.Namespace }}"
spec:
isCA: true
privateKey:
algorithm: {{ $root.Values.tls.certManager.certificates.algorithm | quote }}
encoding: PKCS8
size: {{ $root.Values.tls.certManager.certificates.keySize }}
commonName: Yugabyte Selfsigned CA
secretName: {{ $root.Values.oldNamingStyle | ternary "yugabyte-ca" (printf "%s-ca" (include "yugabyte.fullname" $root)) }}
issuerRef:
name: {{ (printf "%s-bootstrap" (include "yugabyte.tls_cm_issuer" $root)) | quote }}
kind: Issuer
---
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: {{ include "yugabyte.tls_cm_issuer" $root | quote }}
namespace: "{{ $root.Release.Namespace }}"
spec:
ca:
secretName: {{ $root.Values.oldNamingStyle | ternary "yugabyte-ca" (printf "%s-ca" (include "yugabyte.fullname" $root)) }}
---
{{- else }}
{{/* when bootstrapSelfsigned = false, ie. when using an external CA.
Create a Secret with just the rootCA.cert value and mount into master/tserver pods.
This will be used as a fall back in case the Secret generated by cert-manager does not
have a root ca.crt. This can happen for certain certificate issuers like LetsEncrypt.
*/}}
---
apiVersion: v1
kind: Secret
metadata:
name: {{ printf "%s-root-ca" (include "yugabyte.fullname" $root) }}
namespace: "{{ $root.Release.Namespace }}"
labels:
{{- include "yugabyte.labels" $root | indent 4 }}
type: Opaque
data:
ca.crt: {{ $root.Values.tls.rootCA.cert }}
---
{{- end }}
{{/*
The below Certificate resource will trigger cert-manager to issue crt/key into Secrets.
These secrets are mounted into master/tserver pods.
*/}}
{{- range .Values.Services }}
{{- $service := . -}}
{{- $appLabelArgs := dict "label" .label "root" $root -}}
{{- $serviceValues := (dict "Service" $service "Values" $root.Values "Chart" $root.Chart "Release" $root.Release) -}}
{{- $replicas := (eq .name "yb-masters") | ternary $root.Values.replicas.master $root.Values.replicas.tserver -}}
{{- if (gt (int $replicas) 0) }}
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: {{ $root.Values.oldNamingStyle | ternary (printf "%s-yugabyte-tls-cert" $service.label) (printf "%s-%s-tls-cert" (include "yugabyte.fullname" $root) $service.label) }}
namespace: "{{ $root.Release.Namespace }}"
spec:
secretTemplate:
labels:
{{- include "yugabyte.applabel" ($appLabelArgs) | indent 6 }}
{{- include "yugabyte.labels" $root | indent 6 }}
issuerRef:
name: {{ include "yugabyte.tls_cm_issuer" $root | quote }}
{{- if $root.Values.tls.certManager.useClusterIssuer }}
kind: ClusterIssuer
{{- else }}
kind: Issuer
{{- end }}
secretName: {{ $root.Values.oldNamingStyle | ternary (printf "%s-yugabyte-tls-cert" $service.label) (printf "%s-%s-tls-cert" (include "yugabyte.fullname" $root) $service.label) }}
duration: {{ $root.Values.tls.certManager.certificates.duration | quote }}
renewBefore: {{ $root.Values.tls.certManager.certificates.renewBefore | quote }}
isCA: false
privateKey:
algorithm: {{ $root.Values.tls.certManager.certificates.algorithm | quote }}
encoding: PKCS8
size: {{ $root.Values.tls.certManager.certificates.keySize }}
rotationPolicy: Always
usages:
- server auth
- client auth
# At least one of a DNS Name, URI, or IP address is required.
dnsNames:
{{- range $index := until ( int ( $replicas ) ) }}
{{- $nodeOldStyle := printf "%s-%d.%s.%s.svc.%s" $service.label $index $service.name $root.Release.Namespace $root.Values.domainName }}
{{- $nodeNewStyle := printf "%s-%s-%d.%s-%s.%s.svc.%s" (include "yugabyte.fullname" $root) $service.label $index (include "yugabyte.fullname" $root) $service.name $root.Release.Namespace $root.Values.domainName }}
{{- $node := $root.Values.oldNamingStyle | ternary $nodeOldStyle $nodeNewStyle }}
- {{$node}}
{{- end }}
- {{ printf "%s-%s.%s.svc.%s" (include "yugabyte.fullname" $root) $service.name $root.Release.Namespace $root.Values.domainName }}
uris: []
ipAddresses: []
---
{{- end }}
{{- end }}
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: {{ $root.Values.oldNamingStyle | ternary "yugabyte-tls-client-cert" (printf "%s-client-tls" (include "yugabyte.fullname" $root)) }}
namespace: "{{ $root.Release.Namespace }}"
spec:
secretTemplate:
labels:
{{- include "yugabyte.labels" $root | indent 6 }}
issuerRef:
name: {{ include "yugabyte.tls_cm_issuer" $root | quote }}
{{- if $root.Values.tls.certManager.useClusterIssuer }}
kind: ClusterIssuer
{{- else }}
kind: Issuer
{{- end }}
secretName: {{ $root.Values.oldNamingStyle | ternary "yugabyte-tls-client-cert" (printf "%s-client-tls" (include "yugabyte.fullname" $root)) }}
duration: {{ $root.Values.tls.certManager.certificates.duration | quote }}
renewBefore: {{ $root.Values.tls.certManager.certificates.renewBefore | quote }}
commonName: yugabyte
isCA: false
privateKey:
algorithm: {{ $root.Values.tls.certManager.certificates.algorithm | quote }}
encoding: PKCS8
size: {{ $root.Values.tls.certManager.certificates.keySize }}
rotationPolicy: Always
usages:
- client auth
dnsNames: []
uris: []
ipAddresses: []
---
{{- end }}

View File

@ -0,0 +1,23 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "yugabyte.fullname" . }}-master-hooks
namespace: "{{ .Release.Namespace }}"
data:
{{- range $index := until ( int ( .Values.replicas.master ) ) }}
yb-master-{{.}}-pre_debug_hook.sh: "echo 'hello-from-pre' "
yb-master-{{.}}-post_debug_hook.sh: "echo 'hello-from-post' "
{{- end }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "yugabyte.fullname" . }}-tserver-hooks
namespace: "{{ .Release.Namespace }}"
data:
{{- range $index := until ( int ( .Values.replicas.tserver) ) }}
yb-tserver-{{.}}-pre_debug_hook.sh: "echo 'hello-from-pre' "
yb-tserver-{{.}}-post_debug_hook.sh: "echo 'hello-from-post' "
{{- end }}
---

View File

@ -0,0 +1,80 @@
{{- if or .Values.authCredentials.ycql.user .Values.authCredentials.ycql.password .Values.authCredentials.ycql.keyspace .Values.authCredentials.ysql.password .Values.authCredentials.ysql.user .Values.authCredentials.ysql.database }}
apiVersion: batch/v1
kind: Job
metadata:
name: {{ include "yugabyte.fullname" . }}-setup-credentials
namespace: "{{ .Release.Namespace }}"
labels:
app: "setup-credentials"
release: {{ .Release.Name | quote }}
chart: "{{ .Chart.Name }}"
component: "{{ .Values.Component }}"
annotations:
"helm.sh/hook": post-install
"helm.sh/hook-weight": "0"
"helm.sh/hook-delete-policy": hook-succeeded
spec:
backoffLimit: 2
template:
metadata:
name: "setup-credentials"
labels:
app: "setup-credentials"
release: {{ .Release.Name | quote }}
chart: "{{ .Chart.Name }}"
component: "{{ .Values.Component }}"
spec:
restartPolicy: Never
containers:
- name: setup-credentials
image: "{{ .Values.Image.repository }}:{{ .Values.Image.tag }}"
env:
{{- if .Values.authCredentials.ysql.user }}
- name: YSQL_USER
value: "{{ .Values.authCredentials.ysql.user }}"
{{- end }}
{{- if .Values.authCredentials.ysql.password }}
- name: YSQL_PASSWORD
value: "{{ .Values.authCredentials.ysql.password }}"
{{- end }}
{{- if .Values.authCredentials.ysql.database }}
- name: YSQL_DB
value: "{{ .Values.authCredentials.ysql.database }}"
{{- end }}
{{- if .Values.authCredentials.ycql.user }}
- name: YCQL_USER
value: "{{ .Values.authCredentials.ycql.user }}"
{{- end }}
{{- if .Values.authCredentials.ycql.password }}
- name: YCQL_PASSWORD
value: "{{ .Values.authCredentials.ycql.password }}"
{{- end }}
{{- if .Values.authCredentials.ycql.keyspace }}
- name: YCQL_KEYSPACE
value: "{{ .Values.authCredentials.ycql.keyspace }}"
{{- end }}
{{- if .Values.tls.enabled }}
- name: SSL_CERTFILE
value: "/root/.yugabytedb/root.crt"
{{- end }}
command:
- 'bash'
- '/home/yugabyte/bin/setup-credentials/setup-credentials.sh'
volumeMounts:
- name: setup-credentials-script
mountPath: "/home/yugabyte/bin/setup-credentials"
{{- if .Values.tls.enabled }}
- name: yugabyte-tls-client-cert
mountPath: "/root/.yugabytedb/"
{{- end }}
volumes:
- name: setup-credentials-script
configMap:
name: {{ include "yugabyte.fullname" . }}-setup-credentials-script
{{- if .Values.tls.enabled }}
- name: yugabyte-tls-client-cert
secret:
secretName: yugabyte-tls-client-cert
defaultMode: 256
{{- end }}
{{- end }}

View File

@ -0,0 +1,51 @@
{{- if and .Values.serviceMonitor.enabled .Values.serviceMonitor.master.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ include "yugabyte.fullname" . }}-yb-master
labels:
{{- if .Values.oldNamingStyle }}
app: "yb-master"
{{- else }}
app.kubernetes.io/name: "yb-master"
{{- end }}
release: {{ .Release.Name | quote }}
chart: "{{ .Chart.Name }}"
component: "{{ .Values.Component }}"
{{- with .Values.serviceMonitor.extraLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
jobLabel: "release"
selector:
matchLabels:
{{- if .Values.oldNamingStyle }}
app: "yb-master"
{{- else }}
app.kubernetes.io/name: "yb-master"
{{- end }}
release: {{ .Release.Name | quote }}
service-type: "headless"
endpoints:
{{- with .Values.serviceMonitor.master }}
{{- if .enabled }}
- port: {{ .port }}
path: {{ .path }}
{{- if .interval }}
interval: {{ .interval }}
{{- else }}
interval: {{ $.Values.serviceMonitor.interval }}
{{- end }}
relabelings:
- targetLabel: "group"
replacement: "yb-master"
- targetLabel: "export_type"
replacement: "master_export"
- targetLabel: "node_prefix"
replacement: {{ $.Release.Name | quote }}
metricRelabelings:
{{- toYaml $.Values.serviceMonitor.commonMetricRelabelings | nindent 4 }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,23 @@
{{- if (and .Values.multicluster.createCommonTserverService (not .Values.oldNamingStyle)) }}
{{- range $service := .Values.serviceEndpoints }}
{{- if eq $service.name "yb-tserver-service" }}
{{- $appLabelArgs := dict "label" $service.app "root" $ -}}
apiVersion: v1
kind: Service
metadata:
name: "yb-tserver-common"
labels:
{{- include "yugabyte.applabel" ($appLabelArgs) | indent 4 }}
{{- include "yugabyte.labels" $ | indent 4 }}
spec:
ports:
{{- range $label, $port := $service.ports }}
- name: {{ $label | quote }}
port: {{ $port }}
{{- end }}
selector:
{{- include "yugabyte.appselector" ($appLabelArgs) | indent 4 }}
---
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,21 @@
{{- /*
Ref - https://cloud.google.com/kubernetes-engine/docs/how-to/multi-cluster-services#registering_a_service_for_export
https://github.com/kubernetes/enhancements/tree/master/keps/sig-multicluster/1645-multi-cluster-services-api#exporting-services
*/}}
{{- if .Values.multicluster.createServiceExports }}
apiVersion: {{ .Values.multicluster.mcsApiVersion }}
kind: ServiceExport
metadata:
name: {{ .Values.oldNamingStyle | ternary "yb-masters" (printf "%s-%s" (include "yugabyte.fullname" .) "yb-masters") | quote }}
namespace: "{{ .Release.Namespace }}"
labels:
{{- include "yugabyte.labels" . | indent 4 }}
---
apiVersion: {{ .Values.multicluster.mcsApiVersion }}
kind: ServiceExport
metadata:
name: {{ .Values.oldNamingStyle | ternary "yb-tservers" (printf "%s-%s" (include "yugabyte.fullname" .) "yb-tservers") | quote }}
namespace: "{{ .Release.Namespace }}"
labels:
{{- include "yugabyte.labels" . | indent 4 }}
{{ end -}}

View File

@ -0,0 +1,34 @@
{{- if .Values.multicluster.createServicePerPod }}
{{- range $server := .Values.Services }}
{{- range $replicaNum := until (int (ternary $.Values.replicas.master $.Values.replicas.tserver (eq $server.name "yb-masters"))) }}
{{- $appLabelArgs := dict "label" $server.label "root" $ -}}
{{- $podName := $.Values.oldNamingStyle | ternary $server.label (printf "%s-%s" (include "yugabyte.fullname" $) $server.label) -}}
{{- $podName := printf "%s-%d" $podName $replicaNum -}}
apiVersion: v1
kind: Service
metadata:
name: {{ $podName | quote }}
labels:
{{- include "yugabyte.applabel" ($appLabelArgs) | indent 4 }}
{{- include "yugabyte.labels" $ | indent 4 }}
service-type: "non-endpoint"
spec:
ports:
{{- range $label, $port := $server.ports }}
{{- if (eq $label "grpc-ybc-port") }}
{{- if $.Values.ybc.enabled }}
- name: {{ $label | quote }}
port: {{ $port }}
{{- end }}
{{- else }}
- name: {{ $label | quote }}
port: {{ $port }}
{{- end }}
{{- end}}
selector:
statefulset.kubernetes.io/pod-name: {{ $podName | quote }}
{{- include "yugabyte.appselector" ($appLabelArgs) | indent 4 }}
---
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,7 @@
{{- $root := . -}}
--- # Create secrets from other namespaces for masters.
{{- $data := dict "secretenv" $.Values.master.secretEnv "root" . "suffix" "master"}}
{{- include "yugabyte.envsecrets" $data }}
--- # Create secrets from other namespaces for tservers.
{{- $data := dict "secretenv" $.Values.tserver.secretEnv "root" . "suffix" "tserver" }}
{{- include "yugabyte.envsecrets" $data }}

View File

@ -0,0 +1,774 @@
{{- $root := . -}}
---
{{- if and (eq $root.Values.tls.enabled true) (eq $root.Values.tls.certManager.enabled false) }}
---
apiVersion: v1
kind: Secret
metadata:
name: {{ $root.Values.oldNamingStyle | ternary "yugabyte-tls-client-cert" (printf "%s-client-tls" (include "yugabyte.fullname" $root)) }}
namespace: "{{ $root.Release.Namespace }}"
labels:
{{- include "yugabyte.labels" $root | indent 4 }}
type: Opaque
data:
{{- if $root.Values.tls.rootCA.key }}
{{- $rootCAClient := buildCustomCert $root.Values.tls.rootCA.cert $root.Values.tls.rootCA.key -}}
{{- $client := genSignedCert "yugabyte" ( default nil ) ( default nil ) 3650 $rootCAClient }}
root.crt: {{ $rootCAClient.Cert | b64enc }}
yugabytedb.crt: {{ $client.Cert | b64enc }}
yugabytedb.key: {{ $client.Key | b64enc }}
{{- else }}
root.crt: {{ $root.Values.tls.rootCA.cert }}
yugabytedb.crt: {{ $root.Values.tls.clientCert.cert }}
yugabytedb.key: {{ $root.Values.tls.clientCert.key }}
{{- end }}
---
{{- end }}
---
{{- range .Values.Services }}
{{- $service := . -}}
{{- $appLabelArgs := dict "label" .label "root" $root -}}
{{- $serviceValues := (dict "Service" $service "Values" $root.Values "Chart" $root.Chart "Release" $root.Release) -}}
{{- if and (eq $root.Values.tls.enabled true) (eq $root.Values.tls.certManager.enabled false) }}
---
apiVersion: v1
kind: Secret
metadata:
name: {{ $root.Values.oldNamingStyle | ternary (printf "%s-yugabyte-tls-cert" $service.label) (printf "%s-%s-tls-cert" (include "yugabyte.fullname" $root) $service.label) }}
namespace: "{{ $root.Release.Namespace }}"
labels:
{{- include "yugabyte.applabel" ($appLabelArgs) | indent 4 }}
{{- include "yugabyte.labels" $root | indent 4 }}
type: Opaque
data:
{{- $replicas := (eq .name "yb-masters") | ternary $root.Values.replicas.master $root.Values.replicas.tserver -}}
{{- range $index := until ( int ( $replicas ) ) }}
{{- $nodeOldStyle := printf "%s-%d.%s.%s.svc.%s" $service.label $index $service.name $root.Release.Namespace $root.Values.domainName }}
{{- $nodeNewStyle := printf "%s-%s-%d.%s-%s.%s.svc.%s" (include "yugabyte.fullname" $root) $service.label $index (include "yugabyte.fullname" $root) $service.name $root.Release.Namespace $root.Values.domainName }}
{{- if $root.Values.multicluster.createServiceExports -}}
{{- $nodeOldStyle = printf "%s-%d.%s.%s.%s.svc.clusterset.local" $service.label $index $root.Values.multicluster.kubernetesClusterId $service.name $root.Release.Namespace }}
{{- $nodeNewStyle = printf "%s-%s-%d.%s.%s-%s.%s.svc.clusterset.local" (include "yugabyte.fullname" $root) $service.label $index $root.Values.multicluster.kubernetesClusterId (include "yugabyte.fullname" $root) $service.name $root.Release.Namespace }}
{{- end -}}
{{- if $root.Values.multicluster.createServicePerPod -}}
{{- $nodeOldStyle = printf "%s-%d.%s.svc.%s" $service.label $index $root.Release.Namespace $root.Values.domainName }}
{{- $nodeNewStyle = printf "%s-%s-%d.%s.svc.%s" (include "yugabyte.fullname" $root) $service.label $index $root.Release.Namespace $root.Values.domainName }}
{{- end -}}
{{- $node := $root.Values.oldNamingStyle | ternary $nodeOldStyle $nodeNewStyle }}
{{- if $root.Values.tls.rootCA.key }}
{{- $dns1 := printf "*.%s-%s.%s" (include "yugabyte.fullname" $root) $service.name $root.Release.Namespace }}
{{- $dns2 := printf "%s.svc.%s" $dns1 $root.Values.domainName }}
{{- if $root.Values.multicluster.createServiceExports -}}
{{- $dns1 = printf "*.%s.%s-%s.%s.svc.clusterset.local" $root.Values.multicluster.kubernetesClusterId (include "yugabyte.fullname" $root) $service.name $root.Release.Namespace }}
{{- end -}}
{{- if $root.Values.multicluster.createServicePerPod -}}
{{- $dns1 = printf "*.%s.svc.%s" $root.Release.Namespace $root.Values.domainName }}
{{- end -}}
{{- $rootCA := buildCustomCert $root.Values.tls.rootCA.cert $root.Values.tls.rootCA.key -}}
{{- $server := genSignedCert $node ( default nil ) (list $node $dns1 $dns2 ) 3650 $rootCA }}
node.{{$node}}.crt: {{ $server.Cert | b64enc }}
node.{{$node}}.key: {{ $server.Key | b64enc }}
{{- else }}
node.{{$node}}.crt: {{ $root.Values.tls.nodeCert.cert }}
node.{{$node}}.key: {{ $root.Values.tls.nodeCert.key }}
{{- end }}
{{- end }}
ca.crt: {{ $root.Values.tls.rootCA.cert }}
{{- end }}
---
apiVersion: v1
kind: Service
metadata:
name: {{ $root.Values.oldNamingStyle | ternary .name (printf "%s-%s" (include "yugabyte.fullname" $root) .name) | quote }}
labels:
{{- include "yugabyte.applabel" ($appLabelArgs) | indent 4 }}
{{- include "yugabyte.labels" $root | indent 4 }}
service-type: "headless"
spec:
clusterIP: None
ports:
{{- range $label, $port := .ports }}
{{- if (eq $label "grpc-ybc-port") }}
{{- if $root.Values.ybc.enabled }}
- name: {{ $label | quote }}
port: {{ $port }}
{{- end }}
{{- else }}
- name: {{ $label | quote }}
port: {{ $port }}
{{- end }}
{{- end}}
selector:
{{- include "yugabyte.appselector" ($appLabelArgs) | indent 4 }}
{{- if $root.Values.enableLoadBalancer }}
{{- range $endpoint := $root.Values.serviceEndpoints }}
{{- if eq $service.label $endpoint.app }}
---
apiVersion: v1
kind: Service
metadata:
name: {{ $root.Values.oldNamingStyle | ternary $endpoint.name (printf "%s-%s" (include "yugabyte.fullname" $root) $endpoint.name) | quote }}
annotations:
{{ toYaml $endpoint.annotations | indent 4 }}
labels:
{{- include "yugabyte.applabel" ($appLabelArgs) | indent 4 }}
{{- include "yugabyte.labels" $root | indent 4 }}
service-type: "endpoint"
spec:
{{- if eq $root.Release.Service "Tiller" }}
clusterIP:
{{- else }}
{{- if $endpoint.clusterIP }}
clusterIP: {{ $endpoint.clusterIP }}
{{- end }}
{{- end }}
ports:
{{- range $label, $port := $endpoint.ports }}
- name: {{ $label | quote }}
port: {{ $port }}
{{- end}}
selector:
{{- include "yugabyte.appselector" ($appLabelArgs) | indent 4 }}
type: {{ $endpoint.type }}
externalTrafficPolicy: {{ $endpoint.externalTrafficPolicy | default "Cluster" }}
{{- if $endpoint.loadBalancerIP }}
loadBalancerIP: {{ $endpoint.loadBalancerIP }}
{{- end }}
{{- end}}
{{- end}}
{{- end}}
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ $root.Values.oldNamingStyle | ternary .label (printf "%s-%s" (include "yugabyte.fullname" $root) .label) | quote }}
namespace: "{{ $root.Release.Namespace }}"
labels:
{{- include "yugabyte.applabel" ($appLabelArgs) | indent 4 }}
{{- include "yugabyte.labels" $root | indent 4 }}
{{- if $root.Values.statefulSetAnnotations }}
annotations:
{{ toYaml $root.Values.statefulSetAnnotations | indent 4 }}
{{- end }}
spec:
serviceName: {{ $root.Values.oldNamingStyle | ternary .name (printf "%s-%s" (include "yugabyte.fullname" $root) .name) | quote }}
podManagementPolicy: {{ $root.Values.PodManagementPolicy }}
{{ if eq .name "yb-masters" }}
replicas: {{ $root.Values.replicas.master }}
{{ else }}
replicas: {{ $root.Values.replicas.tserver }}
{{ end }}
{{- $storageInfo := (eq .name "yb-masters") | ternary $root.Values.storage.master $root.Values.storage.tserver -}}
{{ if not $root.Values.storage.ephemeral }}
volumeClaimTemplates:
{{- range $index := until (int ($storageInfo.count )) }}
- metadata:
name: {{ $root.Values.oldNamingStyle | ternary (printf "datadir%d" $index) (printf "%s%d" (include "yugabyte.volume_name" $root) $index) }}
{{- if $root.Values.legacyVolumeClaimAnnotations }}
annotations:
volume.beta.kubernetes.io/storage-class: {{ $storageInfo.storageClass | quote }}
{{- end }}
labels:
{{- include "yugabyte.labels" $root | indent 10 }}
spec:
accessModes:
- "ReadWriteOnce"
{{- if $storageInfo.storageClass }}
storageClassName: {{ $storageInfo.storageClass }}
{{- end }}
resources:
requests:
storage: {{ $storageInfo.size }}
{{- end }}
{{- end }}
updateStrategy:
type: RollingUpdate
rollingUpdate:
{{ if eq .name "yb-masters" }}
partition: {{ $root.Values.partition.master }}
{{ else }}
partition: {{ $root.Values.partition.tserver }}
{{ end }}
selector:
matchLabels:
{{- include "yugabyte.appselector" ($appLabelArgs) | indent 6 }}
template:
metadata:
{{- if eq .name "yb-masters" }}
{{- if (or $root.Values.networkAnnotation $root.Values.master.podAnnotations $root.Values.tls.enabled) }}
annotations:
{{- with $root.Values.networkAnnotation }}{{ toYaml . | nindent 8 }}{{ end }}
{{- with $root.Values.master.podAnnotations }}{{ toYaml . | nindent 8 }}{{ end }}
{{- if $root.Values.tls.enabled }}
checksum/rootCA: {{ cat $root.Values.tls.rootCA.cert $root.Values.tls.rootCA.key | sha256sum }}
{{- end }}
{{- end }}
{{- else }}
{{- if (or $root.Values.networkAnnotation $root.Values.tserver.podAnnotations $root.Values.tls.enabled) }}
annotations:
{{- with $root.Values.networkAnnotation }}{{ toYaml . | nindent 8 }}{{ end }}
{{- with $root.Values.tserver.podAnnotations }}{{ toYaml . | nindent 8 }}{{ end }}
{{- if $root.Values.tls.enabled }}
checksum/rootCA: {{ cat $root.Values.tls.rootCA.cert $root.Values.tls.rootCA.key | sha256sum }}
{{- end }}
{{- end }}
{{- end }}
labels:
{{- include "yugabyte.applabel" ($appLabelArgs) | indent 8 }}
{{- include "yugabyte.labels" $root | indent 8 }}
{{- if $root.Values.istioCompatibility.enabled }}
sidecar.istio.io/inject: "true"
{{- end }}
{{- if eq .name "yb-masters" }}
{{- with $root.Values.master.podLabels }}{{ toYaml . | nindent 8 }}{{ end }}
{{- else }}
{{- with $root.Values.tserver.podLabels }}{{ toYaml . | nindent 8 }}{{ end }}
{{- end }}
spec:
{{- if $root.Values.Image.pullSecretName }}
imagePullSecrets:
- name: {{ $root.Values.Image.pullSecretName }}
{{ end }}
{{- if $root.Values.podSecurityContext.enabled }}
securityContext: {{- omit $root.Values.podSecurityContext "enabled" | toYaml | nindent 8 }}
{{- end }}
{{- if $root.Values.nodeSelector }}
nodeSelector:
{{ toYaml $root.Values.nodeSelector | indent 8 }}
{{- end }}
{{- if eq .name "yb-masters" }} # yb-masters
{{- with $root.Values.master.serviceAccount }}
serviceAccountName: {{ . }}
{{- end }}
{{- if $root.Values.master.tolerations }}
tolerations:
{{- with $root.Values.master.tolerations }}{{ toYaml . | nindent 8 }}{{ end }}
{{- end }}
{{- else }} # yb-tservers
{{- with $root.Values.tserver.serviceAccount }}
serviceAccountName: {{ . }}
{{- end }}
{{- if $root.Values.tserver.tolerations }}
tolerations:
{{- with $root.Values.tserver.tolerations }}{{ toYaml . | nindent 8 }}{{ end }}
{{- end }}
{{- end }}
terminationGracePeriodSeconds: 300
affinity:
# Set the anti-affinity selector scope to YB masters and tservers.
{{- $nodeAffinityData := dict}}
{{- if eq .name "yb-masters" -}}
{{- $nodeAffinityData = get $root.Values.master.affinity "nodeAffinity" | default (dict) -}}
{{- else -}}
{{- $nodeAffinityData = get $root.Values.tserver.affinity "nodeAffinity" | default (dict) -}}
{{- end -}}
{{ if $root.Values.AZ }}
{{- $userSelectorTerms := dig "requiredDuringSchedulingIgnoredDuringExecution" "nodeSelectorTerms" "" $nodeAffinityData | default (list) -}}
{{- $baseAffinity := include "yugabyte.multiAZNodeAffinity" $root | fromYaml -}}
{{- $requiredSchedule := (list) -}}
{{- if $userSelectorTerms -}}
{{- range $userSelectorTerms -}}
{{- $userTerm := . -}}
{{- range $baseAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms -}}
{{- $matchExpr := concat .matchExpressions $userTerm.matchExpressions | dict "matchExpressions" -}}
{{- $requiredSchedule = mustMerge $matchExpr $userTerm | append $requiredSchedule -}}
{{- end -}}
{{- end -}}
{{- else -}}
{{- $requiredSchedule = $baseAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms -}}
{{- end -}}
{{- with $baseAffinity.requiredDuringSchedulingIgnoredDuringExecution -}}
{{- $_ := set . "nodeSelectorTerms" $requiredSchedule -}}
{{- end -}}
{{- $nodeAffinityData = mustMerge $baseAffinity $nodeAffinityData -}}
{{- end -}}
{{- $podAntiAffinityData := dict -}}
{{- $basePodAntiAffinity := include "yugabyte.podAntiAffinity" ($appLabelArgs) | fromYaml -}}
{{- if eq .name "yb-masters" -}}
{{- with $root.Values.master.affinity -}}
{{- $userPodAntiAffinity := get . "podAntiAffinity" | default (dict) -}}
{{- if $userPodAntiAffinity -}}
{{- $preferredList := dig "preferredDuringSchedulingIgnoredDuringExecution" "" $userPodAntiAffinity | default (list) | concat $basePodAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution}}
{{- $_ := set $basePodAntiAffinity "preferredDuringSchedulingIgnoredDuringExecution" $preferredList -}}
{{- end -}}
{{- $podAntiAffinityData = mustMerge $basePodAntiAffinity $userPodAntiAffinity -}}
{{- end -}}
{{- else -}}
{{- with $root.Values.tserver.affinity -}}
{{- $userPodAntiAffinity := get . "podAntiAffinity" | default (dict) -}}
{{- if $userPodAntiAffinity -}}
{{- $preferredList := dig "preferredDuringSchedulingIgnoredDuringExecution" "" $userPodAntiAffinity | default (list) | concat $basePodAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution}}
{{- $_ := set $basePodAntiAffinity "preferredDuringSchedulingIgnoredDuringExecution" $preferredList -}}
{{- end -}}
{{- $podAntiAffinityData = mustMerge $basePodAntiAffinity $userPodAntiAffinity -}}
{{- end -}}
{{- end -}}
{{- if eq .name "yb-masters" -}}
{{- if $nodeAffinityData -}}
{{- $_ := set $root.Values.master.affinity "nodeAffinity" $nodeAffinityData -}}
{{- end -}}
{{- $_ := set $root.Values.master.affinity "podAntiAffinity" $podAntiAffinityData -}}
{{ toYaml $root.Values.master.affinity | nindent 8 }}
{{- else -}}
{{- if $nodeAffinityData -}}
{{- $_ := set $root.Values.tserver.affinity "nodeAffinity" $nodeAffinityData -}}
{{- end -}}
{{- $_ := set $root.Values.tserver.affinity "podAntiAffinity" $podAntiAffinityData -}}
{{ toYaml $root.Values.tserver.affinity | nindent 8 }}
{{ end }}
{{- with $root.Values.dnsConfig }}
dnsConfig: {{- toYaml . | nindent 8 }}
{{- end }}
{{- with $root.Values.dnsPolicy }}
dnsPolicy: {{ . | quote }}
{{- end }}
containers:
- name: "{{ .label }}"
image: "{{ $root.Values.Image.repository }}:{{ $root.Values.Image.tag }}"
imagePullPolicy: {{ $root.Values.Image.pullPolicy }}
lifecycle:
postStart:
exec:
command:
- "bash"
- "-c"
- >
mkdir -p /mnt/disk0/cores;
mkdir -p /mnt/disk0/yb-data/scripts;
if [ ! -f /mnt/disk0/yb-data/scripts/log_cleanup.sh ]; then
if [ -f /home/yugabyte/bin/log_cleanup.sh ]; then
cp /home/yugabyte/bin/log_cleanup.sh /mnt/disk0/yb-data/scripts;
fi;
fi
{{- if (and (not $root.Values.storage.ephemeral) (not $service.skipHealthChecks)) }}
livenessProbe:
exec:
command:
- bash
- -v
- -c
- |
{{- include "yugabyte.fs_data_dirs_probe" $storageInfo | nindent 14 }};
exit_code="$?";
echo "disk check exited with: ${exit_code}";
exit "${exit_code}"
failureThreshold: 3
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
{{- end }}
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: HOSTNAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: YBDEVOPS_CORECOPY_DIR
value: "/mnt/disk0/cores"
{{- if eq .name "yb-masters" }}
{{- with $root.Values.master.extraEnv }}{{ toYaml . | nindent 8 }}{{- end }}
{{- $data := dict "secretenv" $root.Values.master.secretEnv "root" $root "suffix" "master"}}
{{- include "yugabyte.addenvsecrets" $data | nindent 8 }}
{{- else }}
{{- with $root.Values.tserver.extraEnv }}{{ toYaml . | nindent 8 }}{{- end }}
{{- $data := dict "secretenv" $root.Values.tserver.secretEnv "root" $root "suffix" "tserver" }}
{{- include "yugabyte.addenvsecrets" $data | nindent 8 }}
{{- end }}
{{- if and $root.Values.tls.enabled $root.Values.tls.clientToServer (ne .name "yb-masters") }}
- name: SSL_CERTFILE
value: /root/.yugabytedb/root.crt
{{- end }}
resources:
{{- if eq .name "yb-masters" }}
{{ toYaml $root.Values.resource.master | indent 10 }}
{{ else }}
{{ toYaml $root.Values.resource.tserver | indent 10 }}
{{ end }}
# core dumps are collected to workingDir if
# kernel.core_pattern is set to a relative path like
# core.%e.%p.%t ref:
# https://github.com/yugabyte/charts/issues/11
workingDir: "/mnt/disk0/cores"
command:
- "/sbin/tini"
- "--"
args:
- "/bin/bash"
- "-c"
- |
{{- if and (not $root.Values.preflight.skipUlimit) (not $root.Values.preflight.skipAll) }}
if [ -f /home/yugabyte/tools/k8s_preflight.py ]; then
/home/yugabyte/tools/k8s_preflight.py all
fi && \
{{- end }}
{{- if (and (not $root.Values.storage.ephemeral) (not $root.Values.preflight.skipAll)) }}
{{- include "yugabyte.fs_data_dirs_probe" $storageInfo | nindent 12 }} && \
{{- end }}
{{- $rpcAddr := include "yugabyte.rpc_bind_address" $serviceValues -}}
{{- $rpcPort := index $service.ports "tcp-rpc-port" -}}
{{- $rpcDict := dict "Addr" $rpcAddr "Port" $rpcPort -}}
{{- $rpcPreflight := include "yugabyte.preflight_check" (set $serviceValues "Preflight" $rpcDict) -}}
{{- if $rpcPreflight -}}{{ $rpcPreflight | nindent 12 }}{{ end -}}
{{- $broadcastAddr := include "yugabyte.server_broadcast_address" $serviceValues -}}
{{/* skip bind check for servicePerPod multi-cluster, we cannot/don't bind to service IP */}}
{{- if not $root.Values.multicluster.createServicePerPod }}
{{- $broadcastPort := index $service.ports "tcp-rpc-port" -}}
{{- $broadcastDict := dict "Addr" $broadcastAddr "Port" $broadcastPort -}}
{{- $broadcastPreflight := include "yugabyte.preflight_check" (set $serviceValues "Preflight" $broadcastDict) -}}
{{- if $broadcastPreflight -}}{{ $broadcastPreflight | nindent 12 }}{{ end -}}
{{- end }}
{{- $webserverAddr := include "yugabyte.webserver_interface" $serviceValues -}}
{{- $webserverPort := index $service.ports "http-ui" -}}
{{- $webserverDict := dict "Addr" $webserverAddr "Port" $webserverPort -}}
{{- $webserverPreflight := include "yugabyte.preflight_check" (set $serviceValues "Preflight" $webserverDict) -}}
{{- if $webserverPreflight -}}{{ $webserverPreflight | nindent 12 }}{{ end }}
if [[ -f /home/yugabyte/tools/k8s_parent.py ]]; then
k8s_parent="/home/yugabyte/tools/k8s_parent.py"
else
k8s_parent=""
fi && \
{{- if and $root.Values.tls.enabled $root.Values.tls.certManager.enabled }}
echo "Creating ephemeral /opt/certs/yugabyte/ as symlink to persisted /mnt/disk0/certs/" && \
mkdir -p /mnt/disk0/certs && \
mkdir -p /opt/certs && \
ln -s /mnt/disk0/certs /opt/certs/yugabyte && \
if [[ ! -f /opt/certs/yugabyte/ca.crt ]]; then
echo "Fresh install of /opt/certs/yugabyte/ca.crt"
cp /home/yugabyte/cert-manager/ca.crt /opt/certs/yugabyte/ca.crt;
fi && \
cmp -s /home/yugabyte/cert-manager/ca.crt /opt/certs/yugabyte/ca.crt;sameRootCA=$? && \
if [[ $sameRootCA -eq 0 ]]; then
echo "Refreshing tls certs at /opt/certs/yugabyte/";
cp /home/yugabyte/cert-manager/tls.crt /opt/certs/yugabyte/node.{{$rpcAddr}}.crt;
cp /home/yugabyte/cert-manager/tls.key /opt/certs/yugabyte/node.{{$rpcAddr}}.key;
chmod 600 /opt/certs/yugabyte/*
else
echo "WARNING: Not refreshing certificates as the root ca.crt has changed"
fi && \
{{- end }}
{{- if eq .name "yb-masters" }}
exec ${k8s_parent} /home/yugabyte/bin/yb-master \
{{- if not $root.Values.storage.ephemeral }}
--fs_data_dirs={{ template "yugabyte.fs_data_dirs" $storageInfo }} \
{{- else }}
--fs_data_dirs=/var/yugabyte \
{{- end }}
{{- if eq $root.Values.ip_version_support "v6_only" }}
--net_address_filter=ipv6_external,ipv6_non_link_local,ipv6_all,ipv4_external,ipv4_all \
{{- end }}
{{- if $root.Values.isMultiAz }}
--master_addresses={{ $root.Values.masterAddresses }} \
--replication_factor={{ $root.Values.replicas.totalMasters }} \
{{- else }}
--master_addresses={{ template "yugabyte.master_addresses" $root }} \
--replication_factor={{ $root.Values.replicas.master }} \
{{- end }}
{{- if not $root.Values.disableYsql }}
--enable_ysql=true \
{{- else }}
--enable_ysql=false \
{{- end }}
--metric_node_name=$(HOSTNAME) \
--memory_limit_hard_bytes={{ template "yugabyte.memory_hard_limit" $root.Values.resource.master }} \
--stderrthreshold=0 \
--num_cpus={{ ceil $root.Values.resource.master.requests.cpu }} \
--undefok=num_cpus,enable_ysql \
{{- range $flag, $override := $root.Values.gflags.master }}
--{{ $flag }}={{ quote $override }} \
{{- end }}
{{- if $root.Values.tls.enabled }}
--certs_dir=/opt/certs/yugabyte \
--use_node_to_node_encryption={{ $root.Values.tls.nodeToNode }} \
--allow_insecure_connections={{ $root.Values.tls.insecure }} \
{{- end }}
--rpc_bind_addresses={{ $rpcAddr }} \
--server_broadcast_addresses={{ $broadcastAddr }} \
--webserver_interface={{ $webserverAddr }}
{{- else }}
{{- $cqlAddr := include "yugabyte.cql_proxy_bind_address" $serviceValues -}}
{{- $cqlPort := index $service.ports "tcp-yql-port" -}}
{{- $cqlDict := dict "Addr" $cqlAddr "Port" $cqlPort -}}
{{- $cqlPreflight := include "yugabyte.preflight_check" (set $serviceValues "Preflight" $cqlDict) -}}
{{- if $cqlPreflight -}}{{ $cqlPreflight | nindent 12 }}{{ end -}}
{{- $ysqlAddr := include "yugabyte.pgsql_proxy_bind_address" $serviceValues -}}
{{- $ysqlPort := index $service.ports "tcp-ysql-port" -}}
{{- if not $root.Values.disableYsql -}}
{{- $ysqlDict := dict "Addr" $ysqlAddr "Port" $ysqlPort -}}
{{- $ysqlPreflight := include "yugabyte.preflight_check" (set $serviceValues "Preflight" $ysqlDict) -}}
{{- if $ysqlPreflight -}}{{ $ysqlPreflight | nindent 12 }}{{ end -}}
{{- end }}
exec ${k8s_parent} /home/yugabyte/bin/yb-tserver \
{{- if not $root.Values.storage.ephemeral }}
--fs_data_dirs={{ template "yugabyte.fs_data_dirs" $storageInfo }} \
{{- else }}
--fs_data_dirs=/var/yugabyte \
{{- end }}
{{- if eq $root.Values.ip_version_support "v6_only" }}
--net_address_filter=ipv6_external,ipv6_non_link_local,ipv6_all,ipv4_external,ipv4_all \
{{- end }}
{{- if $root.Values.isMultiAz }}
--tserver_master_addrs={{ $root.Values.masterAddresses }} \
{{- else }}
--tserver_master_addrs={{ template "yugabyte.master_addresses" $root }} \
{{- end }}
--metric_node_name=$(HOSTNAME) \
--memory_limit_hard_bytes={{ template "yugabyte.memory_hard_limit" $root.Values.resource.tserver }} \
--stderrthreshold=0 \
--num_cpus={{ ceil $root.Values.resource.tserver.requests.cpu }} \
--undefok=num_cpus,enable_ysql \
--use_node_hostname_for_local_tserver=true \
{{- if $root.Values.authCredentials.ysql.password }}
--ysql_enable_auth=true \
{{- end }}
{{- if or $root.Values.authCredentials.ycql.user $root.Values.authCredentials.ycql.password }}
--use_cassandra_authentication=true \
{{- end }}
{{- range $flag, $override := $root.Values.gflags.tserver }}
--{{ $flag }}={{ quote $override }} \
{{- end }}
{{- if $root.Values.tls.enabled }}
--certs_dir=/opt/certs/yugabyte \
--use_node_to_node_encryption={{ $root.Values.tls.nodeToNode }} \
--allow_insecure_connections={{ $root.Values.tls.insecure }} \
--use_client_to_server_encryption={{ $root.Values.tls.clientToServer }} \
--certs_for_client_dir=/opt/certs/yugabyte \
{{- if $root.Values.tserver.serverBroadcastAddress }}
--cert_node_filename={{ include "yugabyte.server_fqdn" $serviceValues }} \
{{- end }}
{{- end }}
--rpc_bind_addresses={{ $rpcAddr }} \
--server_broadcast_addresses={{ $root.Values.tserver.serverBroadcastAddress | default $broadcastAddr }} \
--webserver_interface={{ $webserverAddr }} \
{{- if not $root.Values.disableYsql }}
--enable_ysql=true \
--pgsql_proxy_bind_address={{ $ysqlAddr }} \
{{- else }}
--enable_ysql=false \
{{- end }}
--cql_proxy_bind_address={{ $cqlAddr }}
{{- end }}
ports:
{{- range $label, $port := .ports }}
{{- if not (eq $label "grpc-ybc-port") }}
- containerPort: {{ $port }}
name: {{ $label | quote }}
{{- end }}
{{- end}}
volumeMounts:
{{- if (eq .name "yb-tservers") }}
- name: tserver-tmp
mountPath: /tmp
{{- end }}
- name: debug-hooks-volume
mountPath: /opt/debug_hooks_config
{{ if not $root.Values.storage.ephemeral }}
{{- range $index := until (int ($storageInfo.count)) }}
- name: {{ $root.Values.oldNamingStyle | ternary (printf "datadir%d" $index) (printf "%s%d" (include "yugabyte.volume_name" $root) $index) }}
mountPath: /mnt/disk{{ $index }}
{{- end }}
{{- end }}
{{- if $root.Values.tls.enabled }}
- name: {{ $root.Values.oldNamingStyle | ternary (printf "%s-yugabyte-tls-cert" .label) (printf "%s-%s-tls-cert" (include "yugabyte.fullname" $root) .label) }}
mountPath: {{ $root.Values.tls.certManager.enabled | ternary "/home/yugabyte/cert-manager" "/opt/certs/yugabyte" }}
readOnly: true
- name: {{ $root.Values.oldNamingStyle | ternary "yugabyte-tls-client-cert" (printf "%s-client-tls" (include "yugabyte.fullname" $root)) }}
mountPath: /root/.yugabytedb/
readOnly: true
{{- end }}
{{- if and (eq .name "yb-masters") ($root.Values.master.extraVolumeMounts) -}}
{{- include "yugabyte.isExtraVolumesMappingExists" $root.Values.master -}}
{{- $root.Values.master.extraVolumeMounts | toYaml | nindent 10 -}}
{{- else if and (eq .name "yb-tservers") ($root.Values.tserver.extraVolumeMounts) -}}
{{- include "yugabyte.isExtraVolumesMappingExists" $root.Values.tserver -}}
{{- $root.Values.tserver.extraVolumeMounts | toYaml | nindent 10 -}}
{{- end -}}
{{ if not $root.Values.storage.ephemeral }}
- name: yb-cleanup
image: "{{ $root.Values.Image.repository }}:{{ $root.Values.Image.tag }}"
imagePullPolicy: {{ $root.Values.Image.pullPolicy }}
env:
- name: USER
value: "yugabyte"
command:
- "/sbin/tini"
- "--"
args:
- "/bin/bash"
- "-c"
- >
while true; do
sleep 3600;
/home/yugabyte/scripts/log_cleanup.sh;
done
volumeMounts:
- name: {{ $root.Values.oldNamingStyle | ternary "datadir0" (printf "%s0" (include "yugabyte.volume_name" $root)) }}
mountPath: /home/yugabyte/
subPath: yb-data
- name: {{ $root.Values.oldNamingStyle | ternary "datadir0" (printf "%s0" (include "yugabyte.volume_name" $root)) }}
mountPath: /var/yugabyte/cores
subPath: cores
{{- if $root.Values.ybCleanup.resources }}
resources: {{ toYaml $root.Values.ybCleanup.resources | nindent 10 }}
{{- end }}
{{- end }}
{{- if and (eq .name "yb-tservers") ($root.Values.ybc.enabled) }}
- name: yb-controller
image: "{{ $root.Values.Image.repository }}:{{ $root.Values.Image.tag }}"
imagePullPolicy: {{ $root.Values.Image.pullPolicy }}
lifecycle:
postStart:
exec:
command:
- "bash"
- "-c"
- >
mkdir -p /mnt/disk0/yw-data/controller/tmp;
mkdir -p /mnt/disk0/yw-data/controller/conf;
mkdir -p /mnt/disk0/ybc-data/controller/logs;
mkdir -p /tmp/yugabyte/controller;
ln -sf /mnt/disk0/ybc-data/controller/logs /tmp/yugabyte/controller;
ln -sf /mnt/disk0/yw-data/controller/bin /tmp/yugabyte/controller;
rm -f /tmp/yugabyte/controller/yb-controller.pid;
{{- if and $root.Values.tls.enabled $root.Values.tls.certManager.enabled }}
mkdir -p /opt/certs;
ln -sf /mnt/disk0/certs /opt/certs/yugabyte;
{{- end }}
command:
- "/sbin/tini"
- "--"
args:
- "/bin/bash"
- "-c"
- >
while true; do
sleep 60;
/home/yugabyte/tools/k8s_ybc_parent.py status || /home/yugabyte/tools/k8s_ybc_parent.py start;
done
{{- with index $service.ports "grpc-ybc-port" }}
ports:
- containerPort: {{ . }}
name: "grpc-ybc-port"
{{- end }}
volumeMounts:
- name: tserver-tmp
mountPath: /tmp
{{- if not $root.Values.storage.ephemeral }}
{{- range $index := until (int ($storageInfo.count)) }}
- name: {{ $root.Values.oldNamingStyle | ternary (printf "datadir%d" $index) (printf "%s%d" (include "yugabyte.volume_name" $root) $index) }}
mountPath: /mnt/disk{{ $index }}
{{- end }}
{{- end }}
{{- if $root.Values.tls.enabled }}
- name: {{ $root.Values.oldNamingStyle | ternary (printf "%s-yugabyte-tls-cert" .label) (printf "%s-%s-tls-cert" (include "yugabyte.fullname" $root) .label) }}
mountPath: {{ $root.Values.tls.certManager.enabled | ternary "/home/yugabyte/cert-manager" "/opt/certs/yugabyte" }}
readOnly: true
{{- end }}
{{- if ($root.Values.tserver.extraVolumeMounts) -}}
{{- include "yugabyte.isExtraVolumesMappingExists" $root.Values.tserver -}}
{{- $root.Values.tserver.extraVolumeMounts | toYaml | nindent 10 -}}
{{- end -}}
{{- if $root.Values.ybc.resources }}
resources: {{ toYaml $root.Values.ybc.resources | nindent 10 }}
{{- end }}
{{- end}}
volumes:
{{- if (eq .name "yb-masters") }}
- name: debug-hooks-volume
configMap:
name: {{ include "yugabyte.fullname" $root }}-master-hooks
defaultMode: 0755
{{- else if (eq .name "yb-tservers") }}
- name: debug-hooks-volume
configMap:
name: {{ include "yugabyte.fullname" $root }}-tserver-hooks
defaultMode: 0755
- name: tserver-tmp
emptyDir: {}
{{- end }}
{{ if not $root.Values.storage.ephemeral }}
{{- range $index := until (int ($storageInfo.count)) }}
- name: {{ $root.Values.oldNamingStyle | ternary (printf "datadir%d" $index) (printf "%s%d" (include "yugabyte.volume_name" $root) $index) }}
hostPath:
path: /mnt/disks/ssd{{ $index }}
{{- end }}
{{- end }}
{{- if $root.Values.tls.enabled }}
{{- if $root.Values.tls.certManager.enabled }}
{{- /* certManager enabled */}}
- name: {{ $root.Values.oldNamingStyle | ternary (printf "%s-yugabyte-tls-cert" .label) (printf "%s-%s-tls-cert" (include "yugabyte.fullname" $root) .label) }}
projected:
sources:
{{- if not $root.Values.tls.certManager.bootstrapSelfsigned }}
- secret:
name: {{ printf "%s-root-ca" (include "yugabyte.fullname" $root) }}
{{- end }}
- secret:
name: {{ $root.Values.oldNamingStyle | ternary (printf "%s-yugabyte-tls-cert" .label) (printf "%s-%s-tls-cert" (include "yugabyte.fullname" $root) .label) }}
{{- else }}
{{/* certManager disabled */}}
- name: {{ $root.Values.oldNamingStyle | ternary (printf "%s-yugabyte-tls-cert" .label) (printf "%s-%s-tls-cert" (include "yugabyte.fullname" $root) .label) }}
secret:
secretName: {{ $root.Values.oldNamingStyle | ternary (printf "%s-yugabyte-tls-cert" .label) (printf "%s-%s-tls-cert" (include "yugabyte.fullname" $root) .label) }}
defaultMode: 256
{{- end }}
- name: {{ $root.Values.oldNamingStyle | ternary "yugabyte-tls-client-cert" (printf "%s-client-tls" (include "yugabyte.fullname" $root)) }}
secret:
secretName: {{ $root.Values.oldNamingStyle | ternary "yugabyte-tls-client-cert" (printf "%s-client-tls" (include "yugabyte.fullname" $root)) }}
{{- if $root.Values.tls.certManager.enabled }}
items:
- key: ca.crt
path: root.crt
- key: tls.crt
path: yugabytedb.crt
- key: tls.key
path: yugabytedb.key
{{- end }}
defaultMode: 256
{{- end }}
{{- if and (eq .name "yb-masters") ($root.Values.master.extraVolumes) -}}
{{- include "yugabyte.isExtraVolumesMappingExists" $root.Values.master -}}
{{- $root.Values.master.extraVolumes | toYaml | nindent 8 -}}
{{- else if and (eq .name "yb-tservers") ($root.Values.tserver.extraVolumes) -}}
{{- include "yugabyte.isExtraVolumesMappingExists" $root.Values.tserver -}}
{{- $root.Values.tserver.extraVolumes | toYaml | nindent 8 -}}
{{- end -}}
{{- if eq $root.Values.isMultiAz false }}
---
{{/*
TODO: switch to policy/v1 completely when we stop supporting
Kubernetes versions < 1.21
*/}}
{{- if $root.Capabilities.APIVersions.Has "policy/v1/PodDisruptionBudget" }}
apiVersion: policy/v1
{{- else }}
apiVersion: policy/v1beta1
{{- end }}
kind: PodDisruptionBudget
metadata:
name: {{ $root.Values.oldNamingStyle | ternary (printf "%s-pdb" .label) (printf "%s-%s-pdb" (include "yugabyte.fullname" $root) .name) }}
spec:
maxUnavailable: {{ template "yugabyte.max_unavailable_for_quorum" $root }}
selector:
matchLabels:
{{- include "yugabyte.appselector" ($appLabelArgs) | indent 6 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,259 @@
{{- if or .Values.authCredentials.ycql.user .Values.authCredentials.ycql.password .Values.authCredentials.ycql.keyspace .Values.authCredentials.ysql.password .Values.authCredentials.ysql.user .Values.authCredentials.ysql.database }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "yugabyte.fullname" . }}-setup-credentials-script
namespace: "{{ .Release.Namespace }}"
labels:
release: {{ .Release.Name | quote }}
chart: "{{ .Chart.Name }}"
component: "{{ .Values.Component }}"
data:
setup-credentials.sh: |
#!/bin/bash
set -eo pipefail
# Setup script to setup credentials
# -----------------------------------------
# Default Variables
# -----------------------------------------
readonly DEFAULT_YSQL_USER="yugabyte"
readonly DEFAULT_YSQL_PASSWORD="yugabyte"
readonly DEFAULT_YSQL_DB="yugabyte"
readonly DEFAULT_YCQL_USER="cassandra"
readonly DEFAULT_YCQL_PASSWORD="cassandra"
{{- range .Values.Services }}
{{- $service := . -}}
{{- if eq ($service.name) "yb-tservers" }}
readonly YSQL_PORT={{ index $service.ports "tcp-ysql-port" }}
# TODO: Update the tcp-yql-port to tcp-ycql-port in values.yaml
readonly YCQL_PORT={{ index $service.ports "tcp-yql-port" }}
{{- end }}
{{- end }}
prefix_ysql_cmd=(
/home/yugabyte/bin/ysqlsh -h yb-tservers.{{ .Release.Namespace }}.svc.{{ .Values.domainName }}
-p "$YSQL_PORT"
)
prefix_ycql_cmd=(
/home/yugabyte/bin/ycqlsh yb-tservers.{{ .Release.Namespace }}.svc.{{ .Values.domainName }}
"$YCQL_PORT"
)
{{- if .Values.tls.enabled }}
prefix_ysql_cmd+=("sslmode=require")
prefix_ycql_cmd+=(--ssl)
{{- end }}
# -----------------------------------------
# Variables
# -----------------------------------------
ysql_user=
ysql_password=
ysql_db=
ycql_user=
ycql_password=
ycql_keyspace=
# -----------------------------------------
# Hepler functions
# -----------------------------------------
cleanup() {
local exit_code=$?
echo "Exiting with code $exit_code"
exit "$exit_code"
}
function waitUntilHealthy() {
declare -a ysql_cmd
export PGPASSWORD="$2"
ysql_cmd=(
/home/yugabyte/bin/ysqlsh -h yb-tservers.{{ .Release.Namespace }}.svc.{{ .Values.domainName }}
-p "$3"
-U "$1"
-c "\\conninfo"
)
if [[ "$4" == "true" ]]; then
ysql_cmd+=("sslmode=require")
fi
echo "${ysql_cmd[@]}"
while ! "${ysql_cmd[@]}"; do
sleep 5s
done
}
export -f waitUntilHealthy
get_ysql_credentials() {
[[ -n "$YSQL_USER" ]] && ysql_user="$YSQL_USER" || ysql_user="$DEFAULT_YSQL_USER"
[[ -n "$YSQL_PASSWORD" ]] && ysql_password="$YSQL_PASSWORD"
if [[ -z "$YSQL_PASSWORD" ]] && [[ "$ysql_user" != "$DEFAULT_YSQL_USER" ]]; then
ysql_password="$YSQL_USER"
fi
[[ -n "$YSQL_DB" ]] && ysql_db="$YSQL_DB"
[[ -z "$YSQL_DB" ]] && [[ -n "$YSQL_USER" ]] && ysql_db="$ysql_user"
api="ysql"
}
get_ycql_credentials() {
[[ -n "$YCQL_USER" ]] && ycql_user="$YCQL_USER" || ycql_user="$DEFAULT_YCQL_USER"
[[ -n "$YCQL_PASSWORD" ]] && ycql_password="$YCQL_PASSWORD"
if [[ -z "$YCQL_PASSWORD" ]] && [[ "$ycql_user" != "$DEFAULT_YCQL_USER" ]]; then
ycql_password="$YCQL_USER"
fi
[[ -n "$YCQL_KEYSPACE" ]] && ycql_keyspace="$YCQL_KEYSPACE"
[[ -z "$YCQL_KEYSPACE" ]] && [[ -n "$YCQL_USER" ]] && ycql_keyspace="$ycql_user"
api="ycql"
}
create_user() {
declare -a ysql_cmd
declare -a ycql_cmd
case "$api" in
"ysql")
export PGPASSWORD="$DEFAULT_YSQL_PASSWORD"
read -r -a ysql_cmd <<< "${prefix_ysql_cmd[@]}"
ysql_cmd+=(
-U "$DEFAULT_YSQL_USER"
-c "CREATE ROLE ${ysql_user} with LOGIN SUPERUSER password '${ysql_password}' ;"
-c "ALTER DATABASE ${ysql_db} OWNER TO ${ysql_user} ;"
)
"${ysql_cmd[@]}"
;;
"ycql")
read -r -a ycql_cmd <<< "${prefix_ycql_cmd[@]}"
ycql_cmd+=(
-u "$DEFAULT_YCQL_USER"
-p "$DEFAULT_YCQL_PASSWORD"
-e "CREATE ROLE IF NOT EXISTS ${ycql_user} WITH PASSWORD = '${ycql_password}' AND LOGIN = true AND SUPERUSER = true ;"
)
"${ycql_cmd[@]}"
;;
*) exit 1
esac
}
update_password() {
declare -a ysql_cmd
declare -a ycql_cmd
case "$api" in
"ysql")
export PGPASSWORD="$DEFAULT_YSQL_PASSWORD"
read -r -a ysql_cmd <<< "${prefix_ysql_cmd[@]}"
ysql_cmd+=(
-U "$DEFAULT_YSQL_USER"
-c "ALTER ROLE ${ysql_user} WITH PASSWORD '${ysql_password}' ;"
)
"${ysql_cmd[@]}"
;;
"ycql")
read -r -a ycql_cmd <<< "${prefix_ycql_cmd[@]}"
ycql_cmd+=(
-u "$DEFAULT_YCQL_USER"
-p "$DEFAULT_YCQL_PASSWORD"
-e "ALTER ROLE ${ycql_user} WITH PASSWORD = '${ycql_password}' ;"
)
"${ycql_cmd[@]}"
;;
*) exit 1
esac
}
create_container() {
declare -a ysql_cmd
declare -a ycql_cmd
case "$api" in
"ysql")
export PGPASSWORD="$DEFAULT_YSQL_PASSWORD"
read -r -a ysql_cmd <<< "${prefix_ysql_cmd[@]}"
ysql_cmd+=(
-U "$DEFAULT_YSQL_USER"
-c "CREATE DATABASE ${ysql_db} ;"
)
"${ysql_cmd[@]}"
;;
"ycql")
read -r -a ycql_cmd <<< "${prefix_ycql_cmd[@]}"
ycql_cmd+=(
-u "$DEFAULT_YCQL_USER"
-p "$DEFAULT_YCQL_PASSWORD"
-e "CREATE KEYSPACE IF NOT EXISTS ${ycql_keyspace} ;"
)
"${ycql_cmd[@]}"
;;
*) exit 1
esac
}
# -----------------------------------------
# Main
# -----------------------------------------
trap cleanup EXIT
echo "Waiting for YugabyteDB to start."
if ! timeout 3m bash -c "waitUntilHealthy ${DEFAULT_YSQL_USER} ${DEFAULT_YSQL_PASSWORD} ${YSQL_PORT} {{ .Values.tls.enabled }}"; then
echo "Timeout while waiting for database"
exit 1
fi
# YSQL Credentials
get_ysql_credentials
## Create YSQL DB
if [[ -n $ysql_db ]] && [[ "$ysql_db" != "$DEFAULT_YSQL_DB" ]]; then
create_container
fi
## Update YSQL Password
if [[ -n $ysql_password ]] && [[ "$ysql_password" != "$DEFAULT_YSQL_PASSWORD" ]] && [[ "$ysql_user" == "$DEFAULT_YSQL_USER" ]]; then
update_password
fi
## Create YSQL User
if [[ -n $ysql_user ]] && [[ "$ysql_user" != "$DEFAULT_YSQL_USER" ]]; then
create_user
fi
# YCQL Credentials
get_ycql_credentials
## Create YCQL Keyspace
if [[ -n $ycql_keyspace ]] && [[ -n "$ycql_keyspace" ]]; then
create_container
fi
## Update YCQL Password
if [[ -n $ycql_password ]] && [[ "$ycql_password" != "$DEFAULT_YCQL_PASSWORD" ]] && [[ "$ycql_user" == "$DEFAULT_YCQL_USER" ]]; then
update_password
fi
## Create YCQL User
if [[ -n $ycql_user ]] && [[ "$ycql_user" != "$DEFAULT_YCQL_USER" ]]; then
create_user
fi
{{- end }}

View File

@ -0,0 +1,115 @@
{{- $sm := .Values.serviceMonitor }}
{{ if and $sm.enabled (or $sm.tserver.enabled $sm.ycql.enabled $sm.ysql.enabled $sm.yedis.enabled) }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ include "yugabyte.fullname" . }}-yb-tserver
labels:
{{- if .Values.oldNamingStyle }}
app: "yb-tserver"
{{- else }}
app.kubernetes.io/name: "yb-tserver"
{{- end }}
release: {{ .Release.Name | quote }}
chart: "{{ .Chart.Name }}"
component: "{{ .Values.Component }}"
{{- with .Values.serviceMonitor.extraLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
jobLabel: "release"
selector:
matchLabels:
{{- if .Values.oldNamingStyle }}
app: "yb-tserver"
{{- else }}
app.kubernetes.io/name: "yb-tserver"
{{- end }}
release: {{ .Release.Name | quote }}
service-type: "headless"
endpoints:
{{- with .Values.serviceMonitor.tserver }}
{{- if .enabled }}
- port: {{ .port }}
path: {{ .path }}
{{- if .interval }}
interval: {{ .interval }}
{{- else }}
interval: {{ $.Values.serviceMonitor.interval }}
{{- end }}
relabelings:
- targetLabel: "group"
replacement: "yb-tserver"
- targetLabel: "export_type"
replacement: "tserver_export"
- targetLabel: "node_prefix"
replacement: {{ $.Release.Name | quote }}
metricRelabelings:
{{- toYaml $.Values.serviceMonitor.commonMetricRelabelings | nindent 4 }}
{{- end }}
{{- end }}
{{- with .Values.serviceMonitor.ycql }}
{{- if .enabled }}
- port: {{ .port }}
path: {{ .path }}
{{- if .interval }}
interval: {{ .interval }}
{{- else }}
interval: {{ $.Values.serviceMonitor.interval }}
{{- end }}
relabelings:
- targetLabel: "group"
replacement: "ycql"
- targetLabel: "export_type"
replacement: "cql_export"
- targetLabel: "node_prefix"
replacement: {{ $.Release.Name | quote }}
metricRelabelings:
{{- toYaml $.Values.serviceMonitor.commonMetricRelabelings | nindent 4 }}
{{- end }}
{{- end }}
{{- with .Values.serviceMonitor.ysql }}
{{- if .enabled }}
- port: {{ .port }}
path: {{ .path }}
{{- if .interval }}
interval: {{ .interval }}
{{- else }}
interval: {{ $.Values.serviceMonitor.interval }}
{{- end }}
relabelings:
- targetLabel: "group"
replacement: "ysql"
- targetLabel: "export_type"
replacement: "ysql_export"
- targetLabel: "node_prefix"
replacement: {{ $.Release.Name | quote }}
metricRelabelings:
{{- toYaml $.Values.serviceMonitor.commonMetricRelabelings | nindent 4 }}
{{- end }}
{{- end }}
{{- with .Values.serviceMonitor.yedis }}
{{- if .enabled }}
- port: {{ .port }}
path: {{ .path }}
{{- if .interval }}
interval: {{ .interval }}
{{- else }}
interval: {{ $.Values.serviceMonitor.interval }}
{{- end }}
relabelings:
- targetLabel: "group"
replacement: "yedis"
- targetLabel: "export_type"
replacement: "redis_export"
- targetLabel: "node_prefix"
replacement: {{ $.Release.Name | quote }}
metricRelabelings:
{{- toYaml $.Values.serviceMonitor.commonMetricRelabelings | nindent 4 }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,540 @@
# Default values for yugabyte.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
Component: "yugabytedb"
fullnameOverride: ""
nameOverride: ""
Image:
repository: "yugabytedb/yugabyte"
tag: 2.18.9.0-b17
pullPolicy: IfNotPresent
pullSecretName: ""
storage:
ephemeral: false # will not allocate PVs when true
master:
count: 2
size: 10Gi
storageClass: ""
tserver:
count: 2
size: 10Gi
storageClass: ""
resource:
master:
requests:
cpu: "2"
memory: 2Gi
limits:
cpu: "2"
memory: 2Gi
tserver:
requests:
cpu: "2"
memory: 4Gi
limits:
cpu: "2"
memory: 4Gi
replicas:
master: 3
tserver: 3
## Used to set replication factor when isMultiAz is set to true
totalMasters: 3
partition:
master: 0
tserver: 0
# Used in Multi-AZ setup
masterAddresses: ""
isMultiAz: false
AZ: ""
# Disable the YSQL
disableYsql: false
tls:
# Set to true to enable the TLS.
enabled: false
nodeToNode: true
clientToServer: true
# Set to false to disallow any service with unencrypted communication from joining this cluster
insecure: false
# Set enabled to true to use cert-manager instead of providing your own rootCA
certManager:
enabled: false
# Will create own ca certificate and issuer when set to true
bootstrapSelfsigned: true
# Use ClusterIssuer when set to true, otherwise use Issuer
useClusterIssuer: false
# Name of ClusterIssuer to use when useClusterIssuer is true
clusterIssuer: cluster-ca
# Name of Issuer to use when useClusterIssuer is false
issuer: yugabyte-ca
certificates:
# The lifetime before cert-manager will issue a new certificate.
# The re-issued certificates will not be automatically reloaded by the service.
# It is necessary to provide some external means of restarting the pods.
duration: 2160h # 90d
renewBefore: 360h # 15d
algorithm: RSA # ECDSA or RSA
# Can be 2048, 4096 or 8192 for RSA
# Or 256, 384 or 521 for ECDSA
keySize: 2048
## When certManager.enabled=false, rootCA.cert and rootCA.key are used to generate TLS certs.
## When certManager.enabled=true and boostrapSelfsigned=true, rootCA is ignored.
## When certManager.enabled=true and bootstrapSelfsigned=false, only rootCA.cert is used
## to verify TLS certs generated and signed by the external provider.
rootCA:
cert: "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM2VENDQWRHZ0F3SUJBZ0lCQVRBTkJna3Foa2lHOXcwQkFRc0ZBREFXTVJRd0VnWURWUVFERXd0WmRXZGgKWW5sMFpTQkVRakFlRncweE9UQXlNRGd3TURRd01qSmFGdzB5T1RBeU1EVXdNRFF3TWpKYU1CWXhGREFTQmdOVgpCQU1UQzFsMVoyRmllWFJsSUVSQ01JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBCnVOMWF1aWc4b2pVMHM0OXF3QXhrT2FCaHkwcTlyaVg2akVyZWJyTHJOWDJOeHdWQmNVcWJkUlhVc3VZNS96RUQKUC9CZTNkcTFuMm9EQ2ZGVEwweGkyNFdNZExRcnJBMndCdzFtNHM1WmQzcEJ1U04yWHJkVVhkeUx6dUxlczJNbgovckJxcWRscXp6LzAyTk9TOE9SVFZCUVRTQTBSOFNMQ1RjSGxMQmRkMmdxZ1ZmemVXRlVObXhWQ2EwcHA5UENuCmpUamJJRzhJWkh5dnBkTyt3aURQM1Y1a1ZEaTkvbEtUaGUzcTFOeDg5VUNFcnRJa1pjSkYvWEs3aE90MU1sOXMKWDYzb2lVMTE1Q2svbGFGRjR6dWgrZk9VenpOVXRXeTc2RE92cm5pVGlaU0tQZDBBODNNa2l2N2VHaDVkV3owWgpsKzJ2a3dkZHJaRzVlaHhvbGhGS3pRSURBUUFCbzBJd1FEQU9CZ05WSFE4QkFmOEVCQU1DQXFRd0hRWURWUjBsCkJCWXdGQVlJS3dZQkJRVUhBd0VHQ0NzR0FRVUZCd01DTUE4R0ExVWRFd0VCL3dRRk1BTUJBZjh3RFFZSktvWkkKaHZjTkFRRUxCUUFEZ2dFQkFEQjVRbmlYd1ptdk52eG5VbS9sTTVFbms3VmhTUzRUZldIMHY4Q0srZWZMSVBTbwpVTkdLNXU5UzNEUWlvaU9SN1Vmc2YrRnk1QXljMmNUY1M2UXBxTCt0V1QrU1VITXNJNk9oQ05pQ1gvQjNKWERPCkd2R0RIQzBVOHo3aWJTcW5zQ2Rid05kajAyM0lwMHVqNE9DVHJ3azZjd0RBeXlwVWkwN2tkd28xYWJIWExqTnAKamVQMkwrY0hkc2dKM1N4WWpkK1kvei9IdmFrZG1RZDJTL1l2V0R3aU1SRDkrYmZXWkJVRHo3Y0QyQkxEVmU0aAp1bkFaK3NyelR2Sjd5dkVodzlHSDFyajd4Qm9VNjB5SUUrYSszK2xWSEs4WnBSV0NXMnh2eWNrYXJSKytPS2NKClFsL04wWExqNWJRUDVoUzdhOTdhQktTamNqY3E5VzNGcnhJa2tKST0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo="
key: "LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBdU4xYXVpZzhvalUwczQ5cXdBeGtPYUJoeTBxOXJpWDZqRXJlYnJMck5YMk54d1ZCCmNVcWJkUlhVc3VZNS96RURQL0JlM2RxMW4yb0RDZkZUTDB4aTI0V01kTFFyckEyd0J3MW00czVaZDNwQnVTTjIKWHJkVVhkeUx6dUxlczJNbi9yQnFxZGxxenovMDJOT1M4T1JUVkJRVFNBMFI4U0xDVGNIbExCZGQyZ3FnVmZ6ZQpXRlVObXhWQ2EwcHA5UENualRqYklHOElaSHl2cGRPK3dpRFAzVjVrVkRpOS9sS1RoZTNxMU54ODlVQ0VydElrClpjSkYvWEs3aE90MU1sOXNYNjNvaVUxMTVDay9sYUZGNHp1aCtmT1V6ek5VdFd5NzZET3ZybmlUaVpTS1BkMEEKODNNa2l2N2VHaDVkV3owWmwrMnZrd2RkclpHNWVoeG9saEZLelFJREFRQUJBb0lCQUJsdW1tU3gxR1djWER1Mwpwei8wZEhWWkV4c2NsU3U0SGRmZkZPcTF3cFlCUjlmeGFTZGsxQzR2YXF1UjhMaWl6WWVtVWViRGgraitkSnlSCmpwZ2JNaDV4S1BtRkw5empwU3ZUTkN4UHB3OUF5bm5sM3dyNHZhcU1CTS9aZGpuSGttRC9kQzBadEEvL0JIZ3YKNHk4d3VpWCsvUWdVaER0Z1JNcmR1ZUZ1OVlKaFo5UE9jYXkzSkkzMFhEYjdJSS9vNFNhYnhTcFI3bTg5WjY0NwpUb3hsOEhTSzl0SUQxbkl1bHVpTmx1dHI1RzdDdE93WTBSc2N5dmZ2elg4a1d2akpLZVJVbmhMSCtXVFZOaExICjdZc0tMNmlLa1NkckMzeWVPWnV4R0pEbVdrZVgxTzNPRUVGYkc4TjVEaGNqL0lXbDh1dGt3LzYwTEthNHBCS2cKTXhtNEx3RUNnWUVBNnlPRkhNY2pncHYxLzlHZC8yb3c2YmZKcTFjM1dqQkV2cnM2ZXNyMzgrU3UvdVFneXJNcAo5V01oZElpb2dYZjVlNjV5ZlIzYVBXcjJJdWMxZ0RUNlYycDZFR2h0NysyQkF1YkIzczloZisycVNRY1lkS3pmCnJOTDdKalE4ZEVGZWdYd041cHhKOTRTTVFZNEI4Qm9hOHNJWTd3TzU4dHpVMjZoclVnanFXQ1VDZ1lFQXlVUUIKNzViWlh6MGJ5cEc5NjNwYVp0bGlJY0cvUk1XMnVPOE9rVFNYSGdDSjBob25uRm5IMGZOc1pGTHdFWEtnTTRORworU3ZNbWtUekE5eVVSMHpIMFJ4UW44L1YzVWZLT2k5RktFeWx6NzNiRkV6ZW1QSEppQm12NWQ4ZTlOenZmU0E0CkdpRTYrYnFyV3VVWWRoRWlYTnY1SFNPZ3I4bUx1TzJDbGlmNTg0a0NnWUFlZzlDTmlJWmlOODAzOHNNWFYzZWIKalI5ZDNnYXY3SjJ2UnVyeTdvNDVGNDlpUXNiQ3AzZWxnY1RnczY5eWhkaFpwYXp6OGNEVndhREpyTW16cHF4cQpWY1liaFFIblppSWM5MGRubS9BaVF2eWJWNUZqNnQ5b05VVWtreGpaV1haalJXOGtZMW55QmtDUmJWVnhER0k4CjZOV0ZoeTFGaUVVVGNJcms3WVZFQlFLQmdRREpHTVIrYWRFamtlRlUwNjVadkZUYmN0VFVPY3dzb1Foalc2akkKZVMyTThxakNYeE80NnhQMnVTeFNTWFJKV3FpckQ3NDRkUVRvRjRCaEdXS21veGI3M3pqSGxWaHcwcXhDMnJ4VQorZENxODE0VXVJR3BlOTBMdWU3QTFlRU9kRHB1WVdUczVzc1FmdTE3MG5CUWQrcEhzaHNFZkhhdmJjZkhyTGpQCjQzMmhVUUtCZ1FDZ3hMZG5Pd2JMaHZLVkhhdTdPVXQxbGpUT240SnB5bHpnb3hFRXpzaDhDK0ZKUUQ1bkFxZXEKZUpWSkNCd2VkallBSDR6MUV3cHJjWnJIN3IyUTBqT2ZFallwU1dkZGxXaWh4OTNYODZ0aG83UzJuUlYrN1hNcQpPVW9ZcVZ1WGlGMWdMM1NGeHZqMHhxV3l0d0NPTW5DZGFCb0M0Tkw3enJtL0lZOEUwSkw2MkE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo="
## When tls.certManager.enabled=false
## nodeCert and clientCert will be used only when rootCA.key is empty.
## Will be ignored and genSignedCert will be used to generate
## node and client certs if rootCA.key is provided.
## cert and key are base64 encoded content of certificate and key.
nodeCert:
cert: ""
key: ""
clientCert:
cert: ""
key: ""
gflags:
master:
default_memory_limit_to_ram_ratio: 0.85
tserver: {}
# use_cassandra_authentication: false
PodManagementPolicy: Parallel
enableLoadBalancer: true
ybc:
enabled: false
## https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-requests-and-limits-of-pod-and-container
## Use the above link to learn more about Kubernetes resources configuration.
# resources:
# requests:
# cpu: "1"
# memory: 1Gi
# limits:
# cpu: "1"
# memory: 1Gi
ybCleanup: {}
## https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-requests-and-limits-of-pod-and-container
## Use the above link to learn more about Kubernetes resources configuration.
# resources:
# requests:
# cpu: "1"
# memory: 1Gi
# limits:
# cpu: "1"
# memory: 1Gi
domainName: "cluster.local"
serviceEndpoints:
- name: "yb-master-ui"
type: LoadBalancer
annotations: {}
clusterIP: ""
## Sets the Service's externalTrafficPolicy
externalTrafficPolicy: ""
app: "yb-master"
loadBalancerIP: ""
ports:
http-ui: "7000"
- name: "yb-tserver-service"
type: LoadBalancer
annotations: {}
clusterIP: ""
## Sets the Service's externalTrafficPolicy
externalTrafficPolicy: ""
app: "yb-tserver"
loadBalancerIP: ""
ports:
tcp-yql-port: "9042"
tcp-yedis-port: "6379"
tcp-ysql-port: "5433"
Services:
- name: "yb-masters"
label: "yb-master"
skipHealthChecks: false
memory_limit_to_ram_ratio: 0.85
ports:
http-ui: "7000"
tcp-rpc-port: "7100"
- name: "yb-tservers"
label: "yb-tserver"
skipHealthChecks: false
ports:
http-ui: "9000"
tcp-rpc-port: "9100"
tcp-yql-port: "9042"
tcp-yedis-port: "6379"
tcp-ysql-port: "5433"
http-ycql-met: "12000"
http-yedis-met: "11000"
http-ysql-met: "13000"
grpc-ybc-port: "18018"
## Should be set to true only if Istio is being used. This also adds
## the Istio sidecar injection labels to the pods.
## TODO: remove this once
## https://github.com/yugabyte/yugabyte-db/issues/5641 is fixed.
##
istioCompatibility:
enabled: false
## Settings required when using multicluster environment.
multicluster:
## Creates a ClusterIP service for each yb-master and yb-tserver
## pod.
createServicePerPod: false
## creates a ClusterIP service whos name does not have release name
## in it. A common service across different clusters for automatic
## failover. Useful when using new naming style.
createCommonTserverService: false
## Enable it to deploy YugabyteDB in a multi-cluster services enabled
## Kubernetes cluster (KEP-1645). This will create ServiceExport.
## GKE Ref - https://cloud.google.com/kubernetes-engine/docs/how-to/multi-cluster-services#registering_a_service_for_export
## You can use this gist for the reference to deploy the YugabyteDB in a multi-cluster scenario.
## Gist - https://gist.github.com/baba230896/78cc9bb6f4ba0b3d0e611cd49ed201bf
createServiceExports: false
## Mandatory variable when createServiceExports is set to true.
## Use: In case of GKE, you need to pass GKE Hub Membership Name.
## GKE Ref - https://cloud.google.com/kubernetes-engine/docs/how-to/multi-cluster-services#enabling
kubernetesClusterId: ""
## mcsApiVersion is used for the MCS resources created by the
## chart. Set to net.gke.io/v1 when using GKE MCS.
mcsApiVersion: "multicluster.x-k8s.io/v1alpha1"
serviceMonitor:
## If true, two ServiceMonitor CRs are created. One for yb-master
## and one for yb-tserver
## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitor
##
enabled: false
## interval is the default scrape_interval for all the endpoints
interval: 30s
## extraLabels can be used to add labels to the ServiceMonitors
## being created
extraLabels: {}
# release: prom
## Configurations of ServiceMonitor for yb-master
master:
enabled: true
port: "http-ui"
interval: ""
path: "/prometheus-metrics"
## Configurations of ServiceMonitor for yb-tserver
tserver:
enabled: true
port: "http-ui"
interval: ""
path: "/prometheus-metrics"
ycql:
enabled: true
port: "http-ycql-met"
interval: ""
path: "/prometheus-metrics"
ysql:
enabled: true
port: "http-ysql-met"
interval: ""
path: "/prometheus-metrics"
yedis:
enabled: true
port: "http-yedis-met"
interval: ""
path: "/prometheus-metrics"
commonMetricRelabelings:
# https://git.io/JJW5p
# Save the name of the metric so we can group_by since we cannot by __name__ directly...
- sourceLabels: ["__name__"]
regex: "(.*)"
targetLabel: "saved_name"
replacement: "$1"
# The following basically retrofit the handler_latency_* metrics to label format.
- sourceLabels: ["__name__"]
regex: "handler_latency_(yb_[^_]*)_([^_]*)_([^_]*)(.*)"
targetLabel: "server_type"
replacement: "$1"
- sourceLabels: ["__name__"]
regex: "handler_latency_(yb_[^_]*)_([^_]*)_([^_]*)(.*)"
targetLabel: "service_type"
replacement: "$2"
- sourceLabels: ["__name__"]
regex: "handler_latency_(yb_[^_]*)_([^_]*)_([^_]*)(_sum|_count)?"
targetLabel: "service_method"
replacement: "$3"
- sourceLabels: ["__name__"]
regex: "handler_latency_(yb_[^_]*)_([^_]*)_([^_]*)(_sum|_count)?"
targetLabel: "__name__"
replacement: "rpc_latency$4"
resources: {}
nodeSelector: {}
affinity: {}
statefulSetAnnotations: {}
networkAnnotation: {}
commonLabels: {}
## @param dnsPolicy DNS Policy for pod
## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/
## E.g.
## dnsPolicy: ClusterFirst
dnsPolicy: ""
## @param dnsConfig DNS Configuration pod
## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/
## E.g.
## dnsConfig:
## options:
## - name: ndots
## value: "4"
dnsConfig: {}
master:
## Ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#affinity-v1-core
## This might override the default affinity from service.yaml
# To successfully merge, we need to follow rules for merging nodeSelectorTerms that kubernentes
# has. Each new node selector term is ORed together, and each match expression or match field in
# a single selector is ANDed together.
# This means, if a pod needs to be scheduled on a label 'custom_label_1' with a value
# 'custom_value_1', we need to add this 'subterm' to each of our pre-defined node affinity
# terms.
#
# Pod anti affinity is a simpler merge. Each term is applied separately, and the weight is tracked.
# The pod that achieves the highest weight is selected.
## Example.
# affinity:
# podAntiAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# - labelSelector:
# matchExpressions:
# - key: app
# operator: In
# values:
# - "yb-master"
# topologyKey: kubernetes.io/hostname
#
# For further examples, see examples/yugabyte/affinity_overrides.yaml
affinity: {}
## Extra environment variables passed to the Master pods.
## Ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#envvar-v1-core
## Example:
# extraEnv:
# - name: NODE_IP
# valueFrom:
# fieldRef:
# fieldPath: status.hostIP
extraEnv: []
# secretEnv variables are used to expose secrets data as env variables in the master pod.
# TODO Add namespace also to support copying secrets from other namespace.
# secretEnv:
# - name: MYSQL_LDAP_PASSWORD
# valueFrom:
# secretKeyRef:
# name: secretName
# key: password
secretEnv: []
## Annotations to be added to the Master pods.
podAnnotations: {}
## Labels to be added to the Master pods.
podLabels: {}
## Tolerations to be added to the Master pods.
## Ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#toleration-v1-core
## Example:
# tolerations:
# - key: dedicated
# operator: Equal
# value: experimental
# effect: NoSchedule
tolerations: []
## Extra volumes
## extraVolumesMounts are mandatory for each extraVolumes.
## Ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#volume-v1-core
## Example:
# extraVolumes:
# - name: custom-nfs-vol
# persistentVolumeClaim:
# claimName: some-nfs-claim
extraVolumes: []
## Extra volume mounts
## Ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#volumemount-v1-core
## Example:
# extraVolumeMounts:
# - name: custom-nfs-vol
# mountPath: /home/yugabyte/nfs-backup
extraVolumeMounts: []
## Set service account for master DB pods. The service account
## should exist in the namespace where the master DB pods are brought up.
serviceAccount: ""
tserver:
## Ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#affinity-v1-core
## This might override the default affinity from service.yaml
# To successfully merge, we need to follow rules for merging nodeSelectorTerms that kubernentes
# has. Each new node selector term is ORed together, and each match expression or match field in
# a single selector is ANDed together.
# This means, if a pod needs to be scheduled on a label 'custom_label_1' with a value
# 'custom_value_1', we need to add this 'subterm' to each of our pre-defined node affinity
# terms.
#
# Pod anti affinity is a simpler merge. Each term is applied separately, and the weight is tracked.
# The pod that achieves the highest weight is selected.
## Example.
# affinity:
# podAntiAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# - labelSelector:
# matchExpressions:
# - key: app
# operator: In
# values:
# - "yb-tserver"
# topologyKey: kubernetes.io/hostname
# For further examples, see examples/yugabyte/affinity_overrides.yaml
affinity: {}
## Extra environment variables passed to the TServer pods.
## Ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#envvar-v1-core
## Example:
# extraEnv:
# - name: NODE_IP
# valueFrom:
# fieldRef:
# fieldPath: status.hostIP
extraEnv: []
## secretEnv variables are used to expose secrets data as env variables in the tserver pods.
## If namespace field is not specified we assume that user already
## created the secret in the same namespace as DB pods.
## Example
# secretEnv:
# - name: MYSQL_LDAP_PASSWORD
# valueFrom:
# secretKeyRef:
# name: secretName
# namespace: my-other-namespace-with-ldap-secret
# key: password
secretEnv: []
## Annotations to be added to the TServer pods.
podAnnotations: {}
## Labels to be added to the TServer pods.
podLabels: {}
## Tolerations to be added to the TServer pods.
## Ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#toleration-v1-core
## Example:
# tolerations:
# - key: dedicated
# operator: Equal
# value: experimental
# effect: NoSchedule
tolerations: []
## Sets the --server_broadcast_addresses flag on the TServer, no
## preflight checks are done for this address. You might need to add
## `use_private_ip: cloud` to the gflags.master and gflags.tserver.
serverBroadcastAddress: ""
## Extra volumes
## extraVolumesMounts are mandatory for each extraVolumes.
## Ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#volume-v1-core
## Example:
# extraVolumes:
# - name: custom-nfs-vol
# persistentVolumeClaim:
# claimName: some-nfs-claim
extraVolumes: []
## Extra volume mounts
## Ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#volumemount-v1-core
## Example:
# extraVolumeMounts:
# - name: custom-nfs-vol
# path: /home/yugabyte/nfs-backup
extraVolumeMounts: []
## Set service account for tserver DB pods. The service account
## should exist in the namespace where the tserver DB pods are brought up.
serviceAccount: ""
helm2Legacy: false
ip_version_support: "v4_only" # v4_only, v6_only are the only supported values at the moment
# For more https://docs.yugabyte.com/latest/reference/configuration/yugabyted/#environment-variables
authCredentials:
ysql:
user: ""
password: ""
database: ""
ycql:
user: ""
password: ""
keyspace: ""
oldNamingStyle: true
preflight:
# Set to true to skip disk IO check, DNS address resolution, and
# port bind checks
skipAll: false
# Set to true to skip port bind checks
skipBind: false
## Set to true to skip ulimit verification
## SkipAll has higher priority
skipUlimit: false
## Pod securityContext
## Ref: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context
## The following configuration runs YB-Master and YB-TServer as a non-root user
podSecurityContext:
enabled: false
## Mark it false, if you want to stop the non root user validation
runAsNonRoot: true
fsGroup: 10001
runAsUser: 10001
runAsGroup: 10001
## Added to handle old universe which has volume annotations
## K8s universe <= 2.5 to >= 2.6
legacyVolumeClaimAnnotations: false

View File

@ -0,0 +1,19 @@
# Create YugaByte specific service account
apiVersion: v1
kind: ServiceAccount
metadata:
name: yugabyte-helm
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: yugabyte-helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: yugabyte-helm
namespace: kube-system

View File

@ -0,0 +1,21 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj

View File

@ -0,0 +1,22 @@
annotations:
catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: YugabyteDB Anywhere
catalog.cattle.io/kube-version: '>=1.18-0'
catalog.cattle.io/release-name: yugaware
charts.openshift.io/name: yugaware
apiVersion: v2
appVersion: 2.18.9.0-b17
description: YugabyteDB Anywhere provides deployment, orchestration, and monitoring
for managing YugabyteDB clusters. YugabyteDB Anywhere can create a YugabyteDB cluster
with multiple pods provided by Kubernetes or OpenShift and logically grouped together
to form one logical distributed database.
home: https://www.yugabyte.com
icon: file://assets/icons/yugaware.jpg
kubeVersion: '>=1.18-0'
maintainers:
- email: sanketh@yugabyte.com
name: Sanketh Indarapu
- email: gjalla@yugabyte.com
name: Govardhan Reddy Jalla
name: yugaware
version: 2.18.9

View File

@ -0,0 +1,7 @@
YugabyteDB Anywhere gives you the simplicity and support to deliver a private database-as-a-service (DBaaS) at scale. Use YugabyteDB Anywhere to deploy YugabyteDB across any cloud anywhere in the world with a few clicks, simplify day 2 operations through automation, and get the services needed to realize business outcomes with the database.
YugabyteDB Anywhere can be deployed using this Helm chart. Detailed documentation is available at:
- [Install YugabyteDB Anywhere software - Kubernetes](https://docs.yugabyte.com/preview/yugabyte-platform/install-yugabyte-platform/install-software/kubernetes/)
- [Install YugabyteDB Anywhere software - OpenShift (Helm based)](https://docs.yugabyte.com/preview/yugabyte-platform/install-yugabyte-platform/install-software/openshift/#helm-based-installation)
[![Artifact HUB](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/yugabyte)](https://artifacthub.io/packages/search?repo=yugabyte)

View File

@ -0,0 +1,5 @@
YugabyteDB Anywhere gives you the simplicity and support to deliver a private database-as-a-service (DBaaS) at scale. Use YugabyteDB Anywhere to deploy YugabyteDB across any cloud anywhere in the world with a few clicks, simplify day 2 operations through automation, and get the services needed to realize business outcomes with the database.
YugabyteDB Anywhere can be deployed using this helm chart. Detailed documentation is available at <https://docs.yugabyte.com/preview/yugabyte-platform/install-yugabyte-platform/install-software/kubernetes/>
[![Artifact HUB](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/yugabyte)](https://artifacthub.io/packages/search?repo=yugabyte)

View File

@ -0,0 +1,24 @@
# OCP compatible values for yugaware
image:
repository: quay.io/yugabyte/yugaware-ubi
postgres:
registry: registry.redhat.io
tag: 1-88.1661531722
name: rhscl/postgresql-13-rhel7
prometheus:
registry: registry.redhat.io
tag: v4.11.0
name: openshift4/ose-prometheus
rbac:
create: false
ocpCompatibility:
enabled: true
securityContext:
enabled: false

View File

@ -0,0 +1,267 @@
---
questions:
## Default images for yugaware pod
- variable: questions.defaultYBPlatform
default: true
label: Default Yugabyte Platform configurations
type: boolean
show_subquestion_if: false
group: "Yugabyte Platform"
subquestions:
- variable: image.repository
default: "quay.io/yugabyte/yugaware"
required: false
type: string
label: Yugabyte Platform image repository
description: "Yugabyte Platform image repository"
- variable: image.tag
default: "2.5.1.0-b153"
required: false
type: string
label: Yugabyte Platform image tag
description: "Yugabyte Platform image tag"
- variable: image.pullPolicy
default: "IfNotPresent"
required: false
label: Yugabyte Platform image pull policy
description: "Yugabyte Platform image pull policy"
type: enum
options:
- "Always"
- "IfNotPresent"
- variable: image.pullSecret
default: "yugabyte-k8s-pull-secret"
required: false
type: secret
label: Yugabyte Platform image pull secret
description: "Yugabyte Platform image pull secret"
- variable: yugaware.storage
default: "100Gi"
required: false
type: string
label: Storage
description: "Storage"
- variable: yugaware.storageClass
default: ""
required: false
type: storageclass
label: Storage Class
description: "Storage Class"
- variable: yugaware.resources.requests.cpu
default: "2"
required: false
type: string
label: CPU request for Yugabyte Platform
description: "CPU request for Yugabyte Platform"
- variable: yugaware.resources.requests.memory
default: "4Gi"
required: false
type: string
label: Memory request for Yugabyte Platform
description: "Memory request for Yugabyte Platform"
- variable: yugaware.service.enabled
default: true
description: "Service used to access the Yugabyte Platform"
label: Create service for Yugabyte Platform
type: boolean
show_subquestion_if: false
group: "Platform Service"
subquestions:
- variable: yugaware.service.ip
default: ""
required: false
type: string
label: Yugabyte Platform Service IP
description: "Yugabyte Platform Service IP"
- variable: yugaware.service.type
default: "LoadBalancer"
required: false
type: string
label: Yugabyte Platform Service type
description: "Yugabyte Platform Service type"
- variable: tls.enabled
default: false
required: false
type: boolean
label: Enable TLS on Yugabyte Platform
description: "Enable TLS on Yugabyte Platform"
- variable: tls.hostname
default: "localhost"
required: false
type: string
label: Yugabyte Platform TLS hostname
description: "Yugabyte Platform TLS hostname"
- variable: tls.certificate
default: "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUZDVENDQXZHZ0F3SUJBZ0lVTlhvN2N6T2dyUWQrU09wOWdNdE00b1Vva3hFd0RRWUpLb1pJaHZjTkFRRUwKQlFBd0ZERVNNQkFHQTFVRUF3d0piRzlqWVd4b2IzTjBNQjRYRFRJeE1EUXdOakExTXpnMU4xb1hEVE14TURRdwpOREExTXpnMU4xb3dGREVTTUJBR0ExVUVBd3dKYkc5allXeG9iM04wTUlJQ0lqQU5CZ2txaGtpRzl3MEJBUUVGCkFBT0NBZzhBTUlJQ0NnS0NBZ0VBMUxsSTFBLzRPOVIzSkNlN1N2MUxYVXhDSmxoTWpIWUoxV1FNVmcvai82RHkKazRTTmY0MkFLQjI0dFJFK2lEWTBNaTJrRWhJcVZ4TFdPN0hkWHVSN0tYNGxSZWFVVkRFTUtYUWNQUC9QWDZkbwpwZVZTUFpSVjVHNHNxTElXUFFkTVdIam9IQWx1aml5dGJsSVJUUWdLU3QrMmpuREFDN0dxRURMREdhNXRUWEM2CktRWkNtOERlaklOUTMzaGU2TDN0Q2hBRnhJM1pwY21sR0twbzdKVXJSUG14Mk9zTHFRcTB5dEVVK0lGZGppWHEKaHJLeFR0NUhHM3M3ZUNWaTRXdlZPelVGUitJbWRlQzBRZTBXeG5iZlZUMnJkVitQL1FaVXhWSEVtWnBPc0k2LwpmczhlK1dsMlduWXY1TTg5MWkxZER3Zi9lMDdiN20xQVRKdDRtTGRldzBtd1V4UGFGT2pDMDh6cU94NmF0cGhLClU1eHNWQmhGNVhyME9DeTQyMzN0MU5URXdWUEFDOFcwQmhHdldTRXBQTXNTKzM1b2lueEFrcFQzL01ibFpjNisKcXhSYUh6MHJhSksvVGIzelVKVWxWZFkxbGl5MVYyVjNxWEU2NWlsOUFHZ2pIaHhBNFBwSktCbzZ0WVRUT3pnTworL25mc0toMk95aE8zUWxBZ0JFUHlYUm5wL0xGSTVuQ2gzdjNiOXlabFNrSk05NkVoWEJ1bHhWUWN3L2p3N2NxCkRLSlBEeHFUQy9rWUs1V0FVZGhkWG1KQkRNMFBLcngzUGVOYjRsYnQzSTFIZW1QRDBoZktiWFd6alhiVTJQdWQKdjZmT0dXTDRLSFpaem9KZ1ljMFovRXRUMEpCR09GM09mMW42N2c5dDRlUnAzbEVSL09NM0FPY1dRbWFvOHlVQwpBd0VBQWFOVE1GRXdIUVlEVlIwT0JCWUVGTU00SjA4WG8wUGY1cTlOSWZiMGYyRzZqc1FoTUI4R0ExVWRJd1FZCk1CYUFGTU00SjA4WG8wUGY1cTlOSWZiMGYyRzZqc1FoTUE4R0ExVWRFd0VCL3dRRk1BTUJBZjh3RFFZSktvWkkKaHZjTkFRRUxCUUFEZ2dJQkFBRmxrWVJkdzA0Zm9vT29BelUyaU5ORGV1aiszemhIeFQ5eU9iSkdwREZIRitoZQpuY1ZRWGZpMitHNjBWY0xuZERsWFhmbDZLOSs4ME55aEg4QjR1UEJNTWhoWG01MjJmYnJac1dFcnR3WE1rM2prClZ5UVA3MGk2NHE1ZGVrZzhoYzI0SXhFUlVsam9XM2lDTTdrb0VxaG15VkpGeDNxMVdobFEwdzNkWVpMQVNRclYKU0RpL2JGWjlqOXVtWVdoc0Y4QjFPSThPVjNlL0YyakU1UCtoTlJJazAzbW9zWE1Rdy9iZ3ZzV0hvSkZ5blB4UApHNGUzUjBob2NnbzI0Q2xOQ21YMWFBUms5c1pyN2h0NlVsM1F1d0dMdzZkK2I5emxrUW56TzFXQzc5ekVNU1R0ClRRRzFNT2ZlL2dTVkR3dThTSnpBOHV1Z0pYTktWWkxCZlpaNW41Tk9sOHdpOVVLa1BVUW4wOHo3VWNYVDR5ZnQKZHdrbnZnWDRvMFloUnNQNHpPWDF6eWxObzhqRDhRNlV1SkdQSksrN1JnUm8zVERPV3k4MEZpUzBxRmxrSFdMKwptT0pUWGxzaEpwdHE5b1c1eGx6N1lxTnFwZFVnRmNyTjJLQWNmaGVlNnV3SUFnOFJteTQvRlhRZjhKdXluSG5oClFhVlFnTEpEeHByZTZVNk5EdWg1Y1VsMUZTcWNCUGFPY0x0Q0ViVWg5ckQxajBIdkRnTUUvTTU2TGp1UGdGZlEKMS9xeXlDUkFjc2NCSnVMYjRxcXRUb25tZVZ3T1BBbzBsNXBjcC9JcjRTcTdwM0NML0kwT0o1SEhjcmY3d3JWSgpQVWgzdU1LbWVHVDRyeDdrWlQzQzBXenhUU0loc0lZOU12MVRtelF4MEprQm93c2NYaUYrcXkvUkl5UVgKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo="
required: false
type: string
label: Yugabyte Platform TLS Certificate
description: "Yugabyte Platform TLS Certificate (base64 encoded)"
- variable: tls.key
default: "LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUpRd0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQ1Mwd2dna3BBZ0VBQW9JQ0FRRFV1VWpVRC9nNzFIY2sKSjd0Sy9VdGRURUltV0V5TWRnblZaQXhXRCtQL29QS1RoSTEvallBb0hiaTFFVDZJTmpReUxhUVNFaXBYRXRZNwpzZDFlNUhzcGZpVkY1cFJVTVF3cGRCdzgvODlmcDJpbDVWSTlsRlhrYml5b3NoWTlCMHhZZU9nY0NXNk9MSzF1ClVoRk5DQXBLMzdhT2NNQUxzYW9RTXNNWnJtMU5jTG9wQmtLYndONk1nMURmZUY3b3ZlMEtFQVhFamRtbHlhVVkKcW1qc2xTdEUrYkhZNnd1cENyVEswUlQ0Z1YyT0plcUdzckZPM2tjYmV6dDRKV0xoYTlVN05RVkg0aVoxNExSQgo3UmJHZHQ5VlBhdDFYNC85QmxURlVjU1ptazZ3anI5K3p4NzVhWFphZGkva3p6M1dMVjBQQi85N1R0dnViVUJNCm0zaVl0MTdEU2JCVEU5b1U2TUxUek9vN0hwcTJtRXBUbkd4VUdFWGxldlE0TExqYmZlM1UxTVRCVThBTHhiUUcKRWE5WklTazh5eEw3Zm1pS2ZFQ1NsUGY4eHVWbHpyNnJGRm9mUFN0b2tyOU52Zk5RbFNWVjFqV1dMTFZYWlhlcApjVHJtS1gwQWFDTWVIRURnK2trb0dqcTFoTk03T0E3NytkK3dxSFk3S0U3ZENVQ0FFUS9KZEdlbjhzVWptY0tICmUvZHYzSm1WS1FrejNvU0ZjRzZYRlZCekQrUER0eW9Nb2s4UEdwTUwrUmdybFlCUjJGMWVZa0VNelE4cXZIYzkKNDF2aVZ1M2NqVWQ2WThQU0Y4cHRkYk9OZHRUWSs1Mi9wODRaWXZnb2Rsbk9nbUJoelJuOFMxUFFrRVk0WGM1LwpXZnJ1RDIzaDVHbmVVUkg4NHpjQTV4WkNacWp6SlFJREFRQUJBb0lDQUFmY2lScDlOSmxSY3MyOVFpaTFUN0cwCi9jVFpBb3MyV1lxdlZkMWdYUGEzaGY5NXFKa01LNjVQMnVHbUwzOXRNV1NoVnl6cnl2REkyMjM5VnNjSS9wdzcKOHppd0dzODV1TTlYWVN2SDhHd0NqZFdEc2hSZ2hRUWFKa0JkeElDZzRtdHFuSGxjeDk4dE80T1dPTmwxOEp0dgp4UmxpaFZacFRIV295cGtLWHpPN2RNWExXMjdTSStkaGV2Mm5QeXF1eWpIVEFjT1AwbmxVQ0d2dThFMjkvWWxoCkNQZVJTQzhKSEVGYWxNSFNWaGpJd2ZBVWJvVVJwZU1ZSE15RjVTK2JncGZiajhSbVVUR09DbHRkWGJnYjhJai8KN0hROEFlQkIrYVFKTDVEVnFRN1JWN1ppQlMwR2ZyODlHdXdEMUs4em9mcktPdURkdXpjR2hwZk9MeGpGdmhTOApSQ2Y1Z3BFMzg0aWlHc2tWZC9mZDJLK3NhSmk0L09HbHo0aHhhc1hDcTN1TXB5OTZPNFRrMXZzM3BXdWZNVmJXCnR2d1Mrcjhvbk9uOXZqa3lqOU11eUpId1BpSlNGMUt0ZzhPUU5WMlVST0xXcHlYMWk4Z2xoMXdSelRTQ2diQnMKZ3ZxWkFvaU1pWFh3SlVXN3Zpb0RLZjI0TnZvcjViaVNzeUh0MHVKUVZJaW1iK1prTFJwTWdwRlkyTlcrTnd6LwoxOW9DS2ZUVVpWNkJia09IK0NoOUowLy9hTTRGNnUvMTI4V0UxalJQU05mdWQ0b0dpdGVPNXRsRDNWSXRsb1hlCjNyWVMrcTNuYXU1RStWc2FRZGFVNzhrSnpXYmUrWURmQ1JwWGd6TkloSkMyQ1k5d0RSK3hIaVFwbzdLSHV6dngKUkpuRjhIcGwzdWhIdWxEam44dEpBb0lCQVFEeGxhVVIwN1l6TGF2OVZtamZCenpZMjcwOU9tWnhpa3NtRnlhWApKTkJMQVB3SGdXOEVCUHdKOEprSDhXR1NTekp1OXZGd1JDVEVqZ1J5dWUvS05DWnNmUWF2UDg3dzhablJHaEhjCklHUUV1MFN3bmJzZXFJK1VWa0M5amZjaFE4dlowM0dQTGZ6bWpsSW9PNkNLTVM3TlV2Ynk5MksvOHRVVWRtWWgKMmJJa2N4V0J1RDJoenh3K1ZId3ArWktMQ0FPZi9sOG8vQ20xQ1dZSFNGdVYzTkl3T016Z2FKaExJODJNR08zQwpuODZTMXcweGc2MHB5dUV6L0hXZS9JMFZkRGNsWlgyNC9jalVBb01kQlkvSGY4Tkh2ZUNhZExQeXI3eGpRY2NLClAzN0RhdFRyK2RTZ2RoVkxzUDRRRzVVZEZxNUlMSHoxTXBkb2xXZ2pDSlZqcTZMekFvSUJBUURoYXNYdVRzMDIKNEkvYkRlSGRZSmw2Q1NzVUh2NmJXL3dpYlRhd2dpbDh5RUNWS2x6eFY4eENwWnoxWVhRQlY1YnVvQlArbjZCWApnVHgzTTJHc2R5UU1xdGRCWG9qdGp1czB6ekFNQVQzOWNmdWlHMGR0YXF3eWJMVlEwYThDZnFmMDVyUmZ0ekVmCmtTUDk2d01kVUEyTGdCbnU4akwzOU41UkxtK2RpZUdxeDAwYmJTa3l5UE9HNHIvcDl6KzN6TmVmeUhmbm94bTkKUnQza1RpeGhVNkd4UGhOSnZpWEUrWUpwT0dKVXMvK2dUWWpjUE1zRW9ONHIyR215cUs3S21NZExFa3Y1SHliWgprbmNsV2FMVFlhNEpjMjJUaWZJd01NTWMwaCtBMkJVckdjZFZ6MTA0UXluUFZQZDdXcEszenhqcjRPUHh1YnQ2CjZvTWk2REdRSVNlSEFvSUJBUURTK1YyVHFQRDMxczNaU3VvQXc2Qld2ZWVRbmZ5eThSUFpxdVFQb0oycXNxeG0KblpsbXlEZVhNcDloK1dHOVVhQTBtY0dWeWx6VnJqU2lRRkR4cEFOZVFQMWlkSFh6b3ZveVN2TUg2dDJONkVELwpnRy9XUVZ4S0xkMFI3UFhCL2lQN0VaV2RkWXJqaWF5ajZCYTJPR2RuOWlrbFcvZklLM2Y4QzczN2w5TGoxQUVYCkxOL2QvREh0R1BqcDYwTVgyYUxZeVZzdlBxL3BvdENRVVpkeDA4dFhRM05nRXRmVTN1cDFpNXV2bU1IZEtLTWoKOTV0MDRQRTA1aWVOOVgzOEcyYkJhTldYaFVJcUxCdDJiOUgxWmxVU3hQWnR6TGNObkgwSHJYejJMU2MxMzRrYwpueXdhQ2FWbFdhYzJSL0E3Mi8vTmxkUjJpWDBDWDEvM0lGcmVGUmtUQW9JQkFBbGt0S2pRbWRhZWx3QU8zUW1uCm05MnRBaUdOaFJpZVJheDlscGpXWTdveWNoYUZOR2hPTzFIUHF2SEN4TjNGYzZHd0JBVkpTNW81NVhZbUt2elAKM2kyMDlORmhpaDAwSm5NRjZ6K2swWnQ5STNwRzNyd2RoTjE1RURrMDg3RUw3QjNWZTFDOXhvdEZOaFcvdEZxRgpXbnNrdEcvem9kSVpYeVpNNUJQUmloamV3MFRRVUxZd0Q0M2daeFR0MjdiaUQxNDJNV0R5dUFEZU1pTHdhd01IClJDYXBxbzRaSVdQSzdmZEtoVFo0WmIrZFc0V3A5dC9UZ0U2ZGJ4SWwyMXJQOFFZYzFoT2tpNjduWHBXczNZOG4KYytRcTdqY0d1WlB1aEVMd01xWGcyMGozZ3duOVlTb1dDbWo4Wm0rNmY0Q3ZYWjkrdUtEN0YyZncyOVFaanU4dApvb01DZ2dFQkFPbVVHZ1VoT0tUVys1eEpkZlFKRUVXUncyVFF6Z2l6dSt3aVkzaDYrYXNTejRNY0srVGx6bWxVCmFHT013dFhTUzc0RXIxVmlCVXMrZnJKekFPR21IV0ExZWdtaGVlY1BvaE9ybTh5WkVueVJOSkRhWC9UUXBSUnEKaVdoWENBbjJTWFQxcFlsYVBzMjdkbXpFWnQ3UlVUSkJZZ1hHZXQ4dXFjUXZaVDJZK3N6cHFNV3UzaEpWdmIxdgpZNGRJWE12RG1aV1BPVjFwbHJEaTVoc214VW05TDVtWk1IblllNzFOYkhsaEIxK0VUNXZmWFZjOERzU1RRZWRRCitDRHJKNGQ0em85dFNCa2pwYTM5M2RDRjhCSURESUQyWkVJNCtBVW52NWhTNm82NitOLzBONlp3cXkwc2pKY0cKQ21LeS9tNUpqVzFJWDMxSmZ1UU5Ldm9YNkRFN0Zkaz0KLS0tLS1FTkQgUFJJVkFURSBLRVktLS0tLQo="
required: false
type: string
label: Yugabyte Platform TLS key
description: "Yugabyte Platform TLS key (based64 encoded)"
## Postgres configurations
- variable: questions.defaultPostgres
default: true
description: "Use default postgres configurations"
label: Use default postgres configurations
type: boolean
show_subquestion_if: false
group: "Postgres"
subquestions:
- variable: image.postgres.registry
default: ""
required: false
type: string
label: Postgres image registry
description: "Postgres image registry"
- variable: image.postgres.tag
default: "11.5"
required: false
type: string
label: Postgres image tag
description: "Postgres image tag"
- variable: image.postgres.name
default: "postgres"
required: false
type: string
label: Postgres image name
description: "Postgres image name"
- variable: postgres.service.enabled
default: false
required: false
type: boolean
label: Expose internal Postgres as a Service
description: "Expose internal Postgres as a Service"
- variable: postgres.resources.requests.cpu
default: "0.5"
required: false
type: string
label: CPU request for Postgres
description: "CPU request for Postgres"
- variable: postgres.resources.requests.memory
default: "1Gi"
required: false
type: string
label: Memory request for Postgres
description: "Memory request for Postgres"
- variable: postgres.external.host
default: ""
required: false
type: string
label: External host for Postgres
description: "External host for Postgres"
- variable: postgres.external.port
default: 5432
required: false
type: int
label: External host port for Postgres
description: "External host port for Postgres"
- variable: postgres.external.pass
default: ""
required: false
type: string
label: External host password for Postgres
description: "External host password for Postgres"
- variable: postgres.external.dbname
default: "postgres"
required: false
type: string
label: External host Db name for Postgres
description: "External host Db name for Postgres"
- variable: postgres.external.user
default: "postgres"
required: false
type: string
label: External host Db user for Postgres
description: "External host Db user for Postgres"
- variable: postgres.external.jdbcParams
default: ""
required: false
type: string
label: JDBC connection parameters
description: "JDBC connection parameters including the leading `?"
- variable: questions.defaultPrometheus
default: true
description: "Default Prometheus configurations"
label: Default Prometheus configurations
type: boolean
show_subquestion_if: false
group: "Prometheus"
subquestions:
- variable: image.prometheus.registry
default: ""
required: false
type: string
label: Prometheus image registry
description: "Prometheus image registry"
- variable: image.prometheus.tag
default: "v2.27.1"
required: false
type: string
label: Prometheus image tag
description: "Prometheus image tag"
- variable: image.prometheus.name
default: "prom/prometheus"
required: false
type: string
label: Prometheus image name
description: "Prometheus image name"
- variable: prometheus.resources.requests.cpu
default: "2"
required: false
type: string
label: CPU request for Prometheus
description: "CPU request for Prometheus"
- variable: prometheus.resources.requests.memory
default: "4Gi"
required: false
type: string
label: Memory request for Prometheus
- variable: prometheus.retentionTime
default: 15d
required: false
type: string
label: Retention Time
description: "Retention Time"
- variable: securityContext.enabled
default: false
description: "Enable Security Context"
label: Enable Security Context
type: boolean
show_subquestion_if: true
group: "Security Context"
subquestions:
- variable: securityContext.fsGroup
default: 10001
required: false
type: int
label: fsGroup
description: "fsGroup"
- variable: securityContext.fsGroupChangePolicy
default: "OnRootMismatch"
required: false
type: string
label: fsGroupChangePolicy
description: "fsGroupChangePolicy"
- variable: securityContext.runAsUser
default: 10001
required: false
type: int
label: runAsUser
description: "runAsUser"
- variable: securityContext.runAsGroup
default: 10001
required: false
type: int
label: runAsGroup
description: "runAsGroup"
- variable: securityContext.runAsNonRoot
default: true
required: false
type: boolean
label: runAsNonRoot
description: "runAsNonRoot"

View File

@ -0,0 +1,14 @@
{{/*
The usage of helm upgrade [RELEASE] [CHART] --reuse-values --set [variable]:[value] throws an
error in the event that new entries are inserted to the values chart.
This is because reuse-values flag uses the values from the last release. If --set (/--set-file/
--set-string/--values/-f) is applied with the reuse-values flag, the values from the last
release are overridden for those variables alone, and newer changes to the chart are
unacknowledged.
https://medium.com/@kcatstack/understand-helm-upgrade-flags-reset-values-reuse-values-6e58ac8f127e
To prevent errors while applying upgrade with --reuse-values and --set flags after introducing
new variables, default values can be specified in this file.
*/}}

View File

@ -0,0 +1,237 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "yugaware.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "yugaware.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "yugaware.chart" -}}
{{- printf "%s" .Chart.Name | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Implements customization for the registry for component images.
The preference is to use the image.commonRegistry field first if it is set.
Otherwise the local registry override for each image is used if set, for ex: image.postgres.registry
In both cases, the image name and tag can be customized by using the overrides for each image, for ex: image.postgres.name
*/}}
{{- define "full_image" -}}
{{- $specific_registry := (get (get .root.Values.image .containerName) "registry") -}}
{{- if not (empty .root.Values.image.commonRegistry) -}}
{{- $specific_registry = .root.Values.image.commonRegistry -}}
{{- end -}}
{{- if not (empty $specific_registry) -}}
{{- $specific_registry = printf "%s/" $specific_registry -}}
{{- end -}}
{{- $specific_name := (toString (get (get .root.Values.image .containerName) "name")) -}}
{{- $specific_tag := (toString (get (get .root.Values.image .containerName) "tag")) -}}
{{- printf "%s%s:%s" $specific_registry $specific_name $specific_tag -}}
{{- end -}}
{{/*
Implements customization for the registry for the yugaware docker image.
The preference is to use the image.commonRegistry field first if it is set.
Otherwise the image.repository field is used.
In both cases, image.tag can be used to customize the tag of the yugaware image.
*/}}
{{- define "full_yugaware_image" -}}
{{- $specific_registry := .Values.image.repository -}}
{{- if not (empty .Values.image.commonRegistry) -}}
{{- $specific_registry = printf "%s/%s" .Values.image.commonRegistry "yugabyte/yugaware" -}}
{{- end -}}
{{- $specific_tag := (toString .Values.image.tag) -}}
{{- printf "%s:%s" $specific_registry $specific_tag -}}
{{- end -}}
{{/*
Get or generate PG password
Source - https://github.com/helm/charts/issues/5167#issuecomment-843962731
*/}}
{{- define "getOrGeneratePassword" }}
{{- $len := (default 8 .Length) | int -}}
{{- $obj := (lookup "v1" .Kind .Namespace .Name).data -}}
{{- if $obj }}
{{- index $obj .Key -}}
{{- else if (eq (lower .Kind) "secret") -}}
{{- randAlphaNum $len | b64enc -}}
{{- else -}}
{{- randAlphaNum $len -}}
{{- end -}}
{{- end -}}
{{/*
Similar to getOrGeneratePassword but written for migration from
ConfigMap to Secret. Secret is given precedence, and then the upgrade
case of ConfigMap to Secret is handled.
TODO: remove this after few releases i.e. once all old platform
installations are upgraded, and use getOrGeneratePassword.
*/}}
{{- define "getOrGeneratePasswordConfigMapToSecret" }}
{{- $len := (default 8 .Length) | int -}}
{{- $obj := (lookup "v1" "Secret" .Namespace .Name).data -}}
{{- if $obj }}
{{- index $obj .Key -}}
{{- else -}}
{{- $obj := (lookup "v1" "ConfigMap" .Namespace .Name).data -}}
{{- if $obj }}
{{- index $obj .Key | b64enc -}}
{{- else -}}
{{- randAlphaNum $len | b64enc -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Make list of allowed CORS origins
*/}}
{{- define "allowedCorsOrigins" -}}
[
{{- range .Values.yugaware.additionAllowedCorsOrigins -}}
{{- . | quote }},
{{- end -}}
{{- if .Values.tls.enabled -}}
"https://{{ .Values.tls.hostname }}"
{{- else -}}
"http://{{ .Values.tls.hostname }}"
{{- end -}}
]
{{- end -}}
{{/*
Get or generate server cert and key
*/}}
{{- define "getOrCreateServerCert" -}}
{{- $root := .Root -}}
{{- if and $root.Values.tls.certificate $root.Values.tls.key -}}
server.key: {{ $root.Values.tls.key }}
server.crt: {{ $root.Values.tls.certificate }}
{{- else -}}
{{- $result := (lookup "v1" "Secret" .Namespace .Name).data -}}
{{- if $result -}}
server.key: {{ index $result "server.key" }}
server.crt: {{ index $result "server.crt" }}
{{- else -}}
{{- $cert := genSelfSignedCert $root.Values.tls.hostname nil nil 3560 -}}
server.key: {{ $cert.Key | b64enc }}
server.crt: {{ $cert.Cert | b64enc }}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Get or generate server key cert in pem format
*/}}
{{- define "getOrCreateServerPem" -}}
{{- $root := .Root -}}
{{- if and $root.Values.tls.certificate $root.Values.tls.key -}}
{{- $decodedKey := $root.Values.tls.key | b64dec -}}
{{- $decodedCert := $root.Values.tls.certificate | b64dec -}}
{{- $serverPemContentTemp := ( printf "%s\n%s" $decodedKey $decodedCert ) -}}
{{- $serverPemContent := $serverPemContentTemp | b64enc -}}
server.pem: {{ $serverPemContent }}
{{- else -}}
{{- $result := (lookup "v1" "Secret" .Namespace .Name).data -}}
{{- if $result -}}
{{- $serverPemContent := ( index $result "server.pem" ) -}}
server.pem: {{ $serverPemContent }}
{{- else -}}
{{- $cert := genSelfSignedCert $root.Values.tls.hostname nil nil 3560 -}}
{{- $serverPemContentTemp := ( printf "%s\n%s" $cert.Key $cert.Cert ) -}}
{{- $serverPemContent := $serverPemContentTemp | b64enc -}}
server.pem: {{ $serverPemContent }}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Check export of nss_wrapper environment variables required
*/}}
{{- define "checkNssWrapperExportRequired" -}}
{{- if .Values.securityContext.enabled -}}
{{- if and (ne (int .Values.securityContext.runAsUser) 0) (ne (int .Values.securityContext.runAsUser) 10001) -}}
{{- printf "true" -}}
{{- end -}}
{{- else -}}
{{- printf "false" -}}
{{- end -}}
{{- end -}}
{{/*
Verify the extraVolumes and extraVolumeMounts mappings.
Every extraVolumes should have extraVolumeMounts
*/}}
{{- define "yugaware.isExtraVolumesMappingExists" -}}
{{- $lenExtraVolumes := len .extraVolumes -}}
{{- $lenExtraVolumeMounts := len .extraVolumeMounts -}}
{{- if and (eq $lenExtraVolumeMounts 0) (gt $lenExtraVolumes 0) -}}
{{- fail "You have not provided the extraVolumeMounts for extraVolumes." -}}
{{- else if and (eq $lenExtraVolumes 0) (gt $lenExtraVolumeMounts 0) -}}
{{- fail "You have not provided the extraVolumes for extraVolumeMounts." -}}
{{- else if and (gt $lenExtraVolumes 0) (gt $lenExtraVolumeMounts 0) -}}
{{- $volumeMountsList := list -}}
{{- range .extraVolumeMounts -}}
{{- $volumeMountsList = append $volumeMountsList .name -}}
{{- end -}}
{{- $volumesList := list -}}
{{- range .extraVolumes -}}
{{- $volumesList = append $volumesList .name -}}
{{- end -}}
{{- range $volumesList -}}
{{- if not (has . $volumeMountsList) -}}
{{- fail (printf "You have not provided the extraVolumeMounts for extraVolume %s" .) -}}
{{- end -}}
{{- end -}}
{{- range $volumeMountsList -}}
{{- if not (has . $volumesList) -}}
{{- fail (printf "You have not provided the extraVolumes for extraVolumeMounts %s" .) -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Make list of custom http headers
*/}}
{{- define "customHeaders" -}}
[
{{- $headers := .Values.yugaware.custom_headers -}}
{{- range $index, $element := $headers -}}
{{- if ne $index (sub (len $headers) 1) -}}
{{- . | quote }},
{{- else -}}
{{- . | quote }}
{{- end -}}
{{- end -}}
]
{{- end -}}

View File

@ -0,0 +1,99 @@
# Copyright (c) YugaByte, Inc.
{{- $root := . }}
{{- $tls := $root.Values.tls }}
{{- if and $tls.enabled $tls.certManager.enabled }}
{{- if $tls.certManager.genSelfsigned }}
{{- if $tls.certManager.useClusterIssuer }}
---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: {{ $root.Release.Name }}-yugaware-cluster-issuer
spec:
selfSigned: {}
{{- else }} # useClusterIssuer=false
---
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: {{ $root.Release.Name }}-yugaware-issuer
namespace: {{ $root.Release.Namespace }}
spec:
selfSigned: {}
---
{{- end }} # useClusterIssuer
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: {{ $root.Release.Name }}-yugaware-ui-root-ca
namespace: {{ $root.Release.Namespace }}
spec:
isCA: true
commonName: Yugaware self signed CA
secretName: {{ .Release.Name }}-yugaware-root-ca
secretTemplate:
labels:
app: "{{ template "yugaware.name" . }}"
chart: "{{ template "yugaware.chart" . }}"
release: {{ .Release.Name | quote }}
heritage: {{ .Release.Service | quote }}
duration: {{ $tls.certManager.configuration.duration | quote }}
renewBefore: {{ $tls.certManager.configuration.renewBefore | quote }}
privateKey:
algorithm: {{ $tls.certManager.configuration.algorithm | quote }}
encoding: PKCS8
size: {{ $tls.certManager.configuration.keySize }}
rotationPolicy: Always
issuerRef:
{{- if $tls.certManager.useClusterIssuer }}
name: {{ $root.Release.Name }}-yugaware-cluster-issuer
kind: ClusterIssuer
{{- else }}
name: {{ $root.Release.Name }}-yugaware-issuer
kind: Issuer
{{- end }}
---
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: {{ $root.Release.Name }}-yugaware-ca-issuer
namespace: {{ $root.Release.Namespace }}
spec:
ca:
secretName: {{ .Release.Name }}-yugaware-root-ca
---
{{- end }} # genSelfsigned
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: {{ $root.Release.Name }}-yugaware-ui-tls
namespace: {{ $root.Release.Namespace }}
spec:
isCA: false
commonName: {{ $tls.hostname }}
secretName: {{ .Release.Name }}-yugaware-tls-cert
secretTemplate:
labels:
app: "{{ template "yugaware.name" . }}"
chart: "{{ template "yugaware.chart" . }}"
release: {{ .Release.Name | quote }}
heritage: {{ .Release.Service | quote }}
duration: {{ $tls.certManager.configuration.duration | quote }}
renewBefore: {{ $tls.certManager.configuration.renewBefore | quote }}
privateKey:
algorithm: {{ $tls.certManager.configuration.algorithm | quote }}
encoding: PKCS8
size: {{ $tls.certManager.configuration.keySize }}
rotationPolicy: Always
issuerRef:
name: {{ $tls.certManager.genSelfsigned | ternary (printf "%s%s" $root.Release.Name "-yugaware-ca-issuer") ($tls.certManager.useClusterIssuer | ternary $tls.certManager.clusterIssuer $tls.certManager.issuer) }}
{{- if $tls.certManager.useClusterIssuer }}
kind: ClusterIssuer
{{- else }}
kind: Issuer
{{- end }}
---
{{- end }}

View File

@ -0,0 +1,578 @@
# Copyright (c) YugaByte, Inc.
{{- if .Values.image.pullSecretFile }}
---
apiVersion: v1
data:
.dockerconfigjson: {{ $.Files.Get .Values.image.pullSecretFile | b64enc }}
kind: Secret
metadata:
name: {{ .Values.image.pullSecret }}
type: kubernetes.io/dockerconfigjson
{{- end }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-yugaware-app-config
labels:
app: {{ template "yugaware.name" . }}
chart: {{ template "yugaware.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Values.helm2Legacy | ternary "Tiller" (.Release.Service | quote) }}
data:
application.docker.conf: |
include classpath("application.common.conf")
play.crypto.secret=${APP_SECRET}
play.i18n.langs = [ "en" ]
pidfile.path = "/dev/null"
play.logger.includeConfigProperties=true
log.override.path = "/opt/yugabyte/yugaware/data/logs"
db {
default.dbname=${POSTGRES_DB}
{{ if .Values.postgres.external.host }}
default.host="{{ .Values.postgres.external.host }}"
default.port={{ .Values.postgres.external.port }}
{{ else if eq .Values.ip_version_support "v6_only" }}
default.host="[::1]"
{{ else }}
default.host="127.0.0.1"
{{ end }}
default.url="jdbc:postgresql://"${db.default.host}":"${db.default.port}"/"${db.default.dbname}${db.default.params}
default.params="{{ .Values.jdbcParams }}"
default.username=${POSTGRES_USER}
default.password=${POSTGRES_PASSWORD}
{{ if .Values.yugaware.cloud.enabled }}
perf_advisor.driver="org.hsqldb.jdbc.JDBCDriver"
perf_advisor.url="jdbc:hsqldb:mem:perf-advisor"
perf_advisor.createDatabaseIfMissing=false
perf_advisor.username="sa"
perf_advisor.password="sa"
perf_advisor.migration.auto=false
perf_advisor.migration.disabled=true
{{ else }}
perf_advisor.url="jdbc:postgresql://"${db.default.host}":"${db.default.port}"/"${db.perf_advisor.dbname}${db.default.params}
perf_advisor.createDatabaseUrl="jdbc:postgresql://"${db.default.host}":"${db.default.port}"/"${db.default.dbname}${db.default.params}
{{ end }}
}
{{- if .Values.tls.enabled }}
https.port = 9443
play.server.https.keyStore {
path = /opt/certs/server.pem
type = PEM
}
{{- end }}
yb {
{{- if .Values.yugaware.universe_boot_script }}
universe_boot_script = "/data/universe-boot-script.sh"
{{- end }}
cloud.enabled = {{ .Values.yugaware.cloud.enabled }}
cloud.requestIdHeader = "{{ .Values.yugaware.cloud.requestIdHeader }}"
devops.home = /opt/yugabyte/devops
metrics.host = "{{ eq .Values.ip_version_support "v6_only" | ternary "[::1]" "127.0.0.1" }}"
metrics.url = "http://"${yb.metrics.host}":9090/api/v1"
metrics.management.url = "http://"${yb.metrics.host}":9090/-"
storage.path = /opt/yugabyte/yugaware/data
docker.network = bridge
seedData = false
swamper.targetPath = /opt/yugabyte/prometheus/targets
swamper.rulesPath = /opt/yugabyte/prometheus/rules
security.enable_auth_for_proxy_metrics = {{ .Values.yugaware.enableProxyMetricsAuth }}
proxy_endpoint_timeout = {{ .Values.yugaware.proxyEndpointTimeoutMs }}
multiTenant = {{ .Values.yugaware.multiTenant }}
releases.path = "/opt/yugabyte/releases"
docker.release = "/opt/yugabyte/release"
# TODO(bogdan): need this extra level for installing from local...
thirdparty.packagePath = /opt/third-party
helm.packagePath = "{{ .Values.helm.packagePath }}"
helm.timeout_secs = {{ .Values.helm.timeout }}
health.check_interval_ms = 300000
health.status_interval_ms = 43200000
health.default_email = "{{ .Values.yugaware.health.email }}"
health.ses_email_username = "{{ .Values.yugaware.health.username }}"
health.ses_email_password = "{{ .Values.yugaware.health.password }}"
kubernetes.storageClass = "{{ .Values.yugaware.storageClass }}"
kubernetes.pullSecretName = "{{ .Values.image.pullSecret }}"
url = "https://{{ .Values.tls.hostname }}"
# GKE MCS takes 7 to 10 minutes to setup DNS
wait_for_server_timeout = 15 minutes
{{- if .Values.tls.enabled }}
security.headers.hsts_enabled = true
{{- end }}
security.headers.custom_headers = {{ include "customHeaders" . }}
}
play.filters {
# CSRF config
csrf {
cookie {
# If non null, the CSRF token will be placed in a cookie with this name
name = "csrfCookie"
# Whether the cookie should be set to secure
secure = false
# Whether the cookie should have the HTTP only flag set
httpOnly = false
}
# Whether to bypass CSRF check if CORS check is satisfied
bypassCorsTrustedOrigins = false
header {
# The name of the header to accept CSRF tokens from.
name = "Csrf-Token"
}
}
# CORS config
cors {
pathPrefixes = ["/"]
allowedOrigins = {{ include "allowedCorsOrigins" . }}
# Server allows cookies/credentials to be sent with cross-origin requests
supportsCredentials=true
allowedHttpMethods = ["GET", "POST", "PUT", "OPTIONS", "DELETE"]
allowedHttpHeaders = ["Accept", "Origin", "Content-Type", "X-Auth-Token", "X-AUTH-YW-API-TOKEN", "{{ .Values.yugaware.cloud.requestIdHeader }}", ${play.filters.csrf.header.name}]
}
}
# string config entries from helm values additionalAppConf
{{- range $key, $value := .Values.additionalAppConf.stringConf }}
{{ $key }} = "{{ $value }}"
{{- end }}
# boolean/int config entries from helm values additionalAppConf
{{- range $key, $value := .Values.additionalAppConf.nonStringConf }}
{{ $key }} = {{ $value }}
{{- end }}
{{- if and .Values.tls.enabled (not .Values.tls.certManager.enabled) }}
---
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-yugaware-tls-pem
labels:
app: "{{ template "yugaware.name" . }}"
chart: "{{ template "yugaware.chart" . }}"
release: {{ .Release.Name | quote }}
heritage: {{ .Release.Service | quote }}
type: Opaque
data:
{{- include "getOrCreateServerPem" (dict "Namespace" .Release.Namespace "Root" . "Name" (printf "%s%s" .Release.Name "-yugaware-tls-pem")) | nindent 2 }}
{{- end }}
---
{{- if not (and (.Values.ocpCompatibility.enabled) (eq .Values.image.postgres.registry "registry.redhat.io")) }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-yugaware-pg-upgrade
labels:
app: {{ template "yugaware.name" . }}
chart: {{ template "yugaware.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Values.helm2Legacy | ternary "Tiller" (.Release.Service | quote) }}
data:
pg-upgrade-11-to-14.sh: |
#!/bin/bash
set -x -o errexit
cd /pg_upgrade_logs/
if [ ! "$(ls -A ${PGDATANEW})" ] && [ "$(ls -A ${PGDATAOLD})" ];
then
echo "Upgrading PG data from ${PGDATAOLD} to ${PGDATANEW}"
# if fsGroup is set, we need to remove the sticky bit, and group
# write permission from the directories
chmod -R g-w-s "${PGDATAOLD}"
chmod g-w-s "${PGDATAOLD}"
docker-upgrade pg_upgrade | tee -a /pg_upgrade_logs/pg_upgrade_11_to_14.log;
echo "host all all all scram-sha-256" >> "${PGDATANEW}/pg_hba.conf";
fi
{{- end }}
{{- if .Values.securityContext.enabled }}
---
apiVersion: "v1"
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-yugaware-pg-prerun
labels:
app: {{ template "yugaware.name" . }}
chart: {{ template "yugaware.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Values.helm2Legacy | ternary "Tiller" (.Release.Service | quote) }}
data:
pg-prerun.sh: |
#!/bin/bash
set -x -o errexit
mkdir -p $PGDATA && chown -R $PG_UID:$PG_GID $PGDATA;
{{- end }}
{{- if .Values.prometheus.remoteWrite.tls.enabled }}
---
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-yugaware-prometheus-remote-write-tls
labels:
app: {{ template "yugaware.name" . }}
chart: {{ template "yugaware.chart" . }}
release: {{ .Release.Name | quote }}
heritage: {{ .Values.helm2Legacy | ternary "Tiller" (.Release.Service | quote) }}
type: Opaque
data:
# For user-provided remote write ca cert, cert and key. Expect to be base-64 encoded.
{{- if .Values.prometheus.remoteWrite.tls.caCert }}
ca.crt: {{ .Values.prometheus.remoteWrite.tls.caCert }}
{{- end }}
{{- if .Values.prometheus.remoteWrite.tls.clientCert }}
client.crt: {{ .Values.prometheus.remoteWrite.tls.clientCert }}
{{- end }}
{{- if .Values.prometheus.remoteWrite.tls.clientKey }}
client.key: {{ .Values.prometheus.remoteWrite.tls.clientKey }}
{{- end }}
{{- end}}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-yugaware-prometheus-config
labels:
app: {{ template "yugaware.name" . }}
chart: {{ template "yugaware.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Values.helm2Legacy | ternary "Tiller" (.Release.Service | quote) }}
data:
prometheus.yml: |
global:
scrape_interval: 10s
evaluation_interval: 10s
rule_files:
- '/opt/yugabyte/prometheus/rules/yugaware.ad.*.yml'
- '/opt/yugabyte/prometheus/rules/yugaware.recording-rules.yml'
{{- if .Values.prometheus.remoteWrite.config }}
remote_write:
{{ toYaml .Values.prometheus.remoteWrite.config | indent 6}}
{{- end }}
scrape_configs:
{{- if .Values.ocpCompatibility.enabled }}
- job_name: "ocp-prometheus-federated"
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
insecure_skip_verify: true
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
honor_labels: true
metrics_path: "/federate"
params:
'match[]':
# kubelet metrics
- 'kubelet_volume_stats_used_bytes{persistentvolumeclaim=~"(.*)-yb-(.*)"}'
- 'kubelet_volume_stats_capacity_bytes{persistentvolumeclaim=~"(.*)-yb-(.*)"}'
# kubelet cadvisor metrics
- 'container_cpu_usage_seconds_total{pod=~"(.*)yb-(.*)"}'
- 'container_memory_working_set_bytes{pod=~"(.*)yb-(.*)"}'
# kube-state-metrics
# Supports >= OCP v4.4
# OCP v4.4 has upgraded the KSM from 1.8.0 to 1.9.5.
# https://docs.openshift.com/container-platform/4.4/release_notes/ocp-4-4-release-notes.html#ocp-4-4-cluster-monitoring-version-updates
# - 'kube_pod_container_resource_requests_cpu_cores{pod=~"(.*)yb-(.*)"}'
- 'kube_pod_container_resource_requests{pod=~"(.*)yb-(.*)", unit="core"}'
static_configs:
- targets:
- "prometheus-k8s.openshift-monitoring.svc:9091"
metric_relabel_configs:
# Save the name of the metric so we can group_by since we cannot by __name__ directly...
- source_labels: ["__name__"]
regex: "(.*)"
target_label: "saved_name"
replacement: "$1"
- source_labels: ["pod"]
regex: "(.*)"
target_label: "pod_name"
replacement: "$1"
- source_labels: ["container"]
regex: "(.*)"
target_label: "container_name"
replacement: "$1"
# rename new name of the CPU metric to the old name and label
# ref: https://github.com/kubernetes/kube-state-metrics/blob/master/CHANGELOG.md#v200-alpha--2020-09-16
- source_labels: ["__name__", "unit"]
regex: "kube_pod_container_resource_requests;core"
target_label: "__name__"
replacement: "kube_pod_container_resource_requests_cpu_cores"
{{- else }}
{{- if .Values.prometheus.scrapeKubernetesNodes }}
- job_name: 'kubernetes-nodes'
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
kubernetes_sd_configs:
- role: node
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
- target_label: __address__
replacement: kubernetes.default.svc:443
- source_labels: [__meta_kubernetes_node_name]
regex: (.+)
target_label: __metrics_path__
replacement: /api/v1/nodes/${1}/proxy/metrics
metric_relabel_configs:
# Only keep the metrics which we care about
- source_labels: ["__name__"]
regex: "kubelet_volume_stats_used_bytes|kubelet_volume_stats_capacity_bytes"
action: keep
- source_labels: ["persistentvolumeclaim"]
regex: "(.*)-yb-(.*)"
action: keep
# Save the name of the metric so we can group_by since we cannot by __name__ directly...
- source_labels: ["__name__"]
regex: "(.*)"
target_label: "saved_name"
replacement: "$1"
- source_labels: ["pod"]
regex: "(.*)"
target_label: "pod_name"
replacement: "$1"
- source_labels: ["container"]
regex: "(.*)"
target_label: "container_name"
replacement: "$1"
- job_name: 'kube-state-metrics'
static_configs:
- targets: ['kube-state-metrics.kube-system.svc.{{.Values.domainName}}:8080']
metric_relabel_configs:
# Only keep the metrics which we care about
- source_labels: ["__name__", "unit"]
regex: "kube_pod_container_resource_requests;core"
action: keep
# Save the name of the metric so we can group_by since we cannot by __name__ directly...
- source_labels: ["__name__"]
regex: "(.*)"
target_label: "saved_name"
replacement: "$1"
- source_labels: ["pod"]
regex: "(.*)"
target_label: "pod_name"
replacement: "$1"
- source_labels: ["container"]
regex: "(.*)"
target_label: "container_name"
replacement: "$1"
# Keep metrics from YugabyteDB pods, discard everything else
- source_labels: ["pod_name"]
regex: "(.*)yb-(.*)"
action: keep
# rename new name of the CPU metric to the old name and label
# ref: https://github.com/kubernetes/kube-state-metrics/blob/master/CHANGELOG.md#v200-alpha--2020-09-16
- source_labels: ["__name__", "unit"]
regex: "kube_pod_container_resource_requests;core"
target_label: "__name__"
replacement: "kube_pod_container_resource_requests_cpu_cores"
# Keep metrics for CPU, discard duplicate metrics
- source_labels: ["__name__"]
regex: "kube_pod_container_resource_requests_cpu_cores"
action: keep
- job_name: 'kubernetes-cadvisor'
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
kubernetes_sd_configs:
- role: node
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
- target_label: __address__
replacement: kubernetes.default.svc:443
- source_labels: [__meta_kubernetes_node_name]
regex: (.+)
target_label: __metrics_path__
replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor
metric_relabel_configs:
# Only keep the metrics which we care about
- source_labels: ["__name__"]
regex: "container_cpu_usage_seconds_total|container_memory_working_set_bytes"
action: keep
# Save the name of the metric so we can group_by since we cannot by __name__ directly...
- source_labels: ["__name__"]
regex: "(.*)"
target_label: "saved_name"
replacement: "$1"
- source_labels: ["pod"]
regex: "(.*)"
target_label: "pod_name"
replacement: "$1"
- source_labels: ["container"]
regex: "(.*)"
target_label: "container_name"
replacement: "$1"
# Keep metrics from YugabyteDB pods, discard everything else
- source_labels: ["pod_name"]
regex: "(.*)yb-(.*)"
action: keep
{{- end }}
{{- end }}
{{- if .Values.tls.enabled }}
- job_name: 'platform'
metrics_path: "/api/v1/prometheus_metrics"
scheme: https
tls_config:
insecure_skip_verify: true
static_configs:
- targets: [
'{{ eq .Values.ip_version_support "v6_only" | ternary "[::1]" "127.0.0.1" }}:9443'
]
{{- else }}
- job_name: 'platform'
metrics_path: "/api/v1/prometheus_metrics"
static_configs:
- targets: [
'{{ eq .Values.ip_version_support "v6_only" | ternary "[::1]" "127.0.0.1" }}:9000'
]
{{- end }}
- job_name: 'node-agent'
metrics_path: "/metrics"
file_sd_configs:
- files:
- '/opt/yugabyte/prometheus/targets/node-agent.*.json'
- job_name: "node"
file_sd_configs:
- files:
- '/opt/yugabyte/prometheus/targets/node.*.json'
metric_relabel_configs:
# Below relabels are required for smooth migration from node_exporter 0.13.0 to the latest
- source_labels: ["__name__"]
regex: "node_cpu"
target_label: "__name__"
replacement: "node_cpu_seconds_total"
- source_labels: ["__name__"]
regex: "node_filesystem_free"
target_label: "__name__"
replacement: "node_filesystem_free_bytes"
- source_labels: ["__name__"]
regex: "node_filesystem_size"
target_label: "__name__"
replacement: "node_filesystem_size_bytes"
- source_labels: ["__name__"]
regex: "node_disk_reads_completed"
target_label: "__name__"
replacement: "node_disk_reads_completed_total"
- source_labels: ["__name__"]
regex: "node_disk_writes_completed"
target_label: "__name__"
replacement: "node_disk_writes_completed_total"
- source_labels: ["__name__"]
regex: "node_memory_MemTotal"
target_label: "__name__"
replacement: "node_memory_MemTotal_bytes"
- source_labels: ["__name__"]
regex: "node_memory_Slab"
target_label: "__name__"
replacement: "node_memory_Slab_bytes"
- source_labels: ["__name__"]
regex: "node_memory_Cached"
target_label: "__name__"
replacement: "node_memory_Cached_bytes"
- source_labels: ["__name__"]
regex: "node_memory_Buffers"
target_label: "__name__"
replacement: "node_memory_Buffers_bytes"
- source_labels: ["__name__"]
regex: "node_memory_MemFree"
target_label: "__name__"
replacement: "node_memory_MemFree_bytes"
- source_labels: ["__name__"]
regex: "node_network_receive_bytes"
target_label: "__name__"
replacement: "node_network_receive_bytes_total"
- source_labels: ["__name__"]
regex: "node_network_transmit_bytes"
target_label: "__name__"
replacement: "node_network_transmit_bytes_total"
- source_labels: ["__name__"]
regex: "node_network_receive_packets"
target_label: "__name__"
replacement: "node_network_receive_packets_total"
- source_labels: ["__name__"]
regex: "node_network_transmit_packets"
target_label: "__name__"
replacement: "node_network_transmit_packets_total"
- source_labels: ["__name__"]
regex: "node_network_receive_errs"
target_label: "__name__"
replacement: "node_network_receive_errs_total"
- source_labels: ["__name__"]
regex: "node_network_transmit_errs"
target_label: "__name__"
replacement: "node_network_transmit_errs_total"
- source_labels: ["__name__"]
regex: "node_disk_bytes_read"
target_label: "__name__"
replacement: "node_disk_read_bytes_total"
- source_labels: ["__name__"]
regex: "node_disk_bytes_written"
target_label: "__name__"
replacement: "node_disk_written_bytes_total"
# Save the name of the metric so we can group_by since we cannot by __name__ directly...
- source_labels: ["__name__"]
regex: "(.*)"
target_label: "saved_name"
replacement: "$1"
- job_name: "yugabyte"
tls_config:
insecure_skip_verify: true
metrics_path: "/prometheus-metrics"
file_sd_configs:
- files:
- '/opt/yugabyte/prometheus/targets/yugabyte.*.json'
metric_relabel_configs:
# Save the name of the metric so we can group_by since we cannot by __name__ directly...
- source_labels: ["__name__"]
regex: "(.*)"
target_label: "saved_name"
replacement: "$1"
# The following basically retrofit the handler_latency_* metrics to label format.
- source_labels: ["__name__"]
regex: "handler_latency_(yb_[^_]*)_([^_]*)_([^_]*)(.*)"
target_label: "server_type"
replacement: "$1"
- source_labels: ["__name__"]
regex: "handler_latency_(yb_[^_]*)_([^_]*)_([^_]*)(.*)"
target_label: "service_type"
replacement: "$2"
- source_labels: ["__name__"]
regex: "handler_latency_(yb_[^_]*)_([^_]*)_([^_]*)(_sum|_count)?"
target_label: "service_method"
replacement: "$3"
- source_labels: ["__name__"]
regex: "handler_latency_(yb_[^_]*)_([^_]*)_([^_]*)(_sum|_count)?"
target_label: "__name__"
replacement: "rpc_latency$4"

View File

@ -0,0 +1,23 @@
# Copyright (c) YugaByte, Inc.
---
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-yugaware-global-config
labels:
app: {{ template "yugaware.name" . }}
chart: {{ template "yugaware.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Values.helm2Legacy | ternary "Tiller" (.Release.Service | quote) }}
data:
{{- if .Values.postgres.external.host }}
postgres_db: {{ .Values.postgres.external.dbname | b64enc | quote }}
postgres_user: {{ .Values.postgres.external.user | b64enc | quote }}
postgres_password: {{ .Values.postgres.external.pass | b64enc | quote }}
{{- else }}
postgres_db: {{ .Values.postgres.dbname | b64enc | quote }}
postgres_user: {{ .Values.postgres.user | b64enc | quote }}
postgres_password: {{ include "getOrGeneratePasswordConfigMapToSecret" (dict "Namespace" .Release.Namespace "Name" (printf "%s%s" .Release.Name "-yugaware-global-config") "Key" "postgres_password") | quote }}
{{- end }}
app_secret: {{ randAlphaNum 64 | b64enc | b64enc | quote }}

View File

@ -0,0 +1,49 @@
{{- if .Values.securityContext.enabled }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-yugaware-init
labels:
app: {{ template "yugaware.name" . }}
chart: {{ template "yugaware.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Values.helm2Legacy | ternary "Tiller" (.Release.Service | quote) }}
data:
init-permissions.sh: |
#!/bin/bash
set -xe -o pipefail
data_directory="/opt/yugabyte/yugaware/data"
if [[ -d "${data_directory}/keys/" ]]; then
pemfiles=$(find "${data_directory}/keys/" -name "*.pem" -exec stat -c "%a" {} + | uniq | tr '\n' ',')
IFS="," read -r -a pemfile_perms <<< "${pemfiles}"
trigger=false
echo "Finding pem files with permissions different than 400, and setting their permissions to 400."
for pemfile in "${pemfile_perms[@]}"; do
if [[ "${pemfile}" != *400* ]]; then
echo "Found a pem file with permissions ${pemfile}"
trigger=true
break
fi
done
if ${trigger}; then
echo "Creating copy of data/keys directory"
cp -r "${data_directory}/keys" "${data_directory}/new_keys"
echo "Setting permission of all pem files to 400"
find "${data_directory}/new_keys/" -name "*.pem" -exec chmod 400 {} +
echo "Renaming existing keys directory"
mv "${data_directory}/keys" "${data_directory}/keys-$(date +%s)"
echo "Renaming new keys directory"
mv "${data_directory}/new_keys" "${data_directory}/keys"
else
echo "All pem files already have permission set to 400"
fi
fi
{{- end }}

View File

@ -0,0 +1,19 @@
{{/*
TODO: switch to policy/v1 completely when we stop supporting
Kubernetes versions < 1.21
*/}}
{{- if .Values.pdbPolicyVersionOverride }}
apiVersion: policy/{{ .Values.pdbPolicyVersionOverride }}
{{- else if .Capabilities.APIVersions.Has "policy/v1/PodDisruptionBudget" }}
apiVersion: policy/v1
{{- else }}
apiVersion: policy/v1beta1
{{- end }}
kind: PodDisruptionBudget
metadata:
name: {{ .Release.Name }}-yugaware-pdb
spec:
maxUnavailable: {{ .Values.yugaware.podDisruptionBudget.maxUnavailable | toJson }}
selector:
matchLabels:
app: {{ .Release.Name }}-yugaware

View File

@ -0,0 +1,218 @@
{{ if not .Values.yugaware.serviceAccount }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ .Release.Name }}
labels:
k8s-app: yugaware
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
{{- if .Values.yugaware.serviceAccountAnnotations }}
annotations:
{{ toYaml .Values.yugaware.serviceAccountAnnotations | indent 4 }}
{{- end }}
{{ end }}
{{- if .Values.rbac.create }}
{{- if .Values.ocpCompatibility.enabled }}
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ .Release.Name }}-cluster-monitoring-view
labels:
app: yugaware
subjects:
- kind: ServiceAccount
name: {{ .Values.yugaware.serviceAccount | default .Release.Name }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: cluster-monitoring-view
apiGroup: rbac.authorization.k8s.io
{{- else }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ .Release.Name }}
rules:
# Set of permissions required for operator
- apiGroups:
- operator.yugabyte.io
resources:
- "*"
verbs:
- "get"
- "create"
- "delete"
- "patch"
- "list"
- "watch"
- "update"
# Set of permissions required to install, upgrade, delete the yugabyte chart
- apiGroups:
- "policy"
resources:
- "poddisruptionbudgets"
verbs:
- "get"
- "create"
- "delete"
- "patch"
- apiGroups:
- ""
resources:
- "services"
verbs:
- "get"
- "delete"
- "create"
- "patch"
- apiGroups:
- "apps"
resources:
- "statefulsets"
verbs:
- "get"
- "list"
- "delete"
- "create"
- "patch"
- apiGroups:
- ""
resources:
- "secrets"
verbs:
- "create"
- "list"
- "get"
- "delete"
- "update"
- "patch"
- apiGroups:
- "cert-manager.io"
resources:
- "certificates"
verbs:
- "create"
- "delete"
- "get"
- "patch"
- apiGroups:
- ""
resources:
- "configmaps"
verbs:
- "get"
- "create"
- "patch"
- "delete"
# Set of permissions required by YBA to manage YB DB universes
- apiGroups:
- ""
resources:
- "namespaces"
verbs:
- "delete"
- "create"
- "patch"
- "get"
- "list"
- apiGroups:
- ""
resources:
- "pods"
verbs:
- "get"
- "list"
- "delete"
- apiGroups:
- ""
resources:
- "services"
verbs:
- "get"
- "list"
- apiGroups:
- ""
resources:
- "persistentvolumeclaims"
verbs:
- "get"
- "patch"
- "list"
- "delete"
- apiGroups:
- ""
resources:
- "pods/exec"
verbs:
- "create"
- apiGroups:
- "apps"
resources:
- "statefulsets/scale"
verbs:
- "patch"
- apiGroups:
- ""
resources:
- "events"
verbs:
- "list"
# required to scrape resource metrics like CPU, memory, etc.
- apiGroups:
- ""
resources:
- "nodes"
verbs:
- "list"
- "get"
- "watch"
# required to scrape resource metrics like CPU, memory, etc.
- apiGroups:
- ""
resources:
- "nodes/proxy"
verbs:
- "get"
# Ref: https://github.com/yugabyte/charts/commit/4a5319972385666487a7bc2cd0c35052f2cfa4c5
- apiGroups:
- ""
resources:
- "events"
verbs:
- "get"
- "list"
- "watch"
- "create"
- "update"
- "patch"
- "delete"
- apiGroups:
- ""
resources:
- "configmaps"
verbs:
- "list"
- "watch"
- "update"
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ .Release.Name }}
labels:
k8s-app: yugaware
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
subjects:
- kind: ServiceAccount
name: {{ .Values.yugaware.serviceAccount | default .Release.Name }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: {{ .Release.Name }}
apiGroup: rbac.authorization.k8s.io
{{- end }}
{{- end }}

View File

@ -0,0 +1,97 @@
# Copyright (c) YugaByte, Inc.
{{- if .Values.yugaware.service.enabled }}
---
apiVersion: v1
kind: Service
metadata:
name: {{ .Release.Name }}-yugaware-ui
{{- if .Values.yugaware.service.annotations }}
annotations:
{{ toYaml .Values.yugaware.service.annotations | indent 4 }}
{{- end }}
labels:
app: {{ .Release.Name }}-yugaware
chart: {{ template "yugaware.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Values.helm2Legacy | ternary "Tiller" (.Release.Service | quote) }}
spec:
{{- if eq .Release.Service "Tiller" }}
clusterIP:
{{- else }}
{{- if .Values.yugaware.service.clusterIP }}
clusterIP: .Values.yugaware.service.clusterIP
{{- end }}
{{- end }}
ports:
{{- if .Values.tls.enabled }}
- name: ui-tls
port: 443
targetPort: 9443
{{- end }}
- name: ui
port: 80
targetPort: 9000
- name: metrics
port: 9090
selector:
app: {{ .Release.Name }}-yugaware
type: "{{ .Values.yugaware.service.type }}"
{{- if and (eq .Values.yugaware.service.type "LoadBalancer") (.Values.yugaware.service.ip) }}
loadBalancerIP: "{{ .Values.yugaware.service.ip }}"
{{- end }}
{{- if .Values.yugaware.service.loadBalancerSourceRanges }}
loadBalancerSourceRanges:
{{- toYaml .Values.yugaware.service.loadBalancerSourceRanges | nindent 4 }}
{{- end }}
{{- end }}
{{- if .Values.yugaware.serviceMonitor.enabled }}
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ .Release.Name }}-yugaware
{{- if .Values.yugaware.serviceMonitor.annotations }}
annotations:
{{ toYaml .Values.yugaware.serviceMonitor.annotations | indent 4 }}
{{- end }}
labels:
app: {{ .Release.Name }}-yugaware
chart: {{ template "yugaware.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Values.helm2Legacy | ternary "Tiller" (.Release.Service | quote) }}
spec:
endpoints:
- port: ui # Scrape the Platform itself instead of bundled Prometheus
path: api/v1/prometheus_metrics
selector:
matchLabels:
app: {{ .Release.Name }}-yugaware
{{- end }}
{{- if .Values.postgres.service.enabled }}
---
apiVersion: v1
kind: Service
metadata:
name: {{ .Release.Name }}-postgres
{{- if .Values.postgres.service.annotations }}
annotations:
{{ toYaml .Values.postgres.service.annotations | indent 4 }}
{{- end }}
labels:
app: {{ .Release.Name }}-yugaware
chart: {{ template "yugaware.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Values.helm2Legacy | ternary "Tiller" (.Release.Service | quote) }}
spec:
ports:
- name: postgres
port: 5432
targetPort: 5432
selector:
app: {{ .Release.Name }}-yugaware
type: {{ .Values.postgres.service.type }}
{{- if and (eq .Values.postgres.service.type "LoadBalancer") (.Values.postgres.service.ip) }}
loadBalancerIP: "{{ .Values.postgres.service.ip }}"
{{- end }}
{{- end }}

View File

@ -0,0 +1,456 @@
# Copyright (c) YugaByte, Inc.
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ .Release.Name }}-yugaware
labels:
app: {{ .Release.Name }}-yugaware
chart: {{ template "yugaware.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Values.helm2Legacy | ternary "Tiller" (.Release.Service | quote) }}
spec:
serviceName: {{ .Release.Name }}-yugaware
replicas: {{ .Values.yugaware.replicas }}
selector:
matchLabels:
app: {{ .Release.Name }}-yugaware
template:
metadata:
annotations:
checksum/config: {{ include (print $.Template.BasePath "/configs.yaml") . | sha256sum }}
{{- if .Values.yugaware.pod.annotations }}
{{ toYaml .Values.yugaware.pod.annotations | indent 8 }}
{{- end }}
labels:
app: {{ .Release.Name }}-yugaware
{{- if .Values.yugaware.pod.labels }}
{{ toYaml .Values.yugaware.pod.labels | indent 8 }}
{{- end }}
spec:
serviceAccountName: {{ .Values.yugaware.serviceAccount | default .Release.Name }}
imagePullSecrets:
- name: {{ .Values.image.pullSecret }}
{{- if .Values.securityContext.enabled }}
securityContext:
fsGroup: {{ .Values.securityContext.fsGroup }}
{{- if (semverCompare ">=1.20-x" .Capabilities.KubeVersion.Version) }}
fsGroupChangePolicy: {{ .Values.securityContext.fsGroupChangePolicy }}
{{- end }}
{{- end }}
{{- if .Values.nodeSelector }}
nodeSelector:
{{ toYaml .Values.nodeSelector | indent 8}}
{{- end }}
{{- if .Values.tolerations }}
tolerations:
{{- with .Values.tolerations }}{{ toYaml . | nindent 8 }}{{ end }}
{{- end }}
{{- if .Values.zoneAffinity }}
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: failure-domain.beta.kubernetes.io/zone
operator: In
values:
{{ toYaml .Values.zoneAffinity | indent 18 }}
- matchExpressions:
- key: topology.kubernetes.io/zone
operator: In
values:
{{ toYaml .Values.zoneAffinity | indent 18 }}
{{- end }}
volumes:
- name: yugaware-storage
persistentVolumeClaim:
claimName: {{ .Release.Name }}-yugaware-storage
- name: yugaware-ui
emptyDir: {}
- name: yugaware-config
projected:
sources:
- configMap:
name: {{ .Release.Name }}-yugaware-app-config
items:
- key: application.docker.conf
path: application.docker.conf
{{- if .Values.yugaware.universe_boot_script }}
- configMap:
name: {{ .Release.Name }}-universe-boot-script
items:
- key: universe_boot_script
path: universe-boot-script.sh
{{- end }}
- name: prometheus-config
configMap:
name: {{ .Release.Name }}-yugaware-prometheus-config
items:
- key: prometheus.yml
path: prometheus.yml
{{- if .Values.securityContext.enabled }}
- name: init-container-script
configMap:
name: {{ .Release.Name }}-yugaware-init
items:
- key: init-permissions.sh
path: init-permissions.sh
{{- end }}
{{- if .Values.tls.enabled }}
- name: {{ .Release.Name }}-yugaware-tls-pem
secret:
secretName: {{ .Release.Name }}-yugaware-tls-pem
items:
- key: server.pem
path: server.pem
{{- end }}
{{- if .Values.prometheus.remoteWrite.tls.enabled }}
- name: {{ .Release.Name }}-yugaware-prometheus-remote-write-tls
secret:
secretName: {{ .Release.Name }}-yugaware-prometheus-remote-write-tls
{{- end }}
{{- if not (and (.Values.ocpCompatibility.enabled) (eq .Values.image.postgres.registry "registry.redhat.io")) }}
- name: pg-upgrade-11-to-14
configMap:
name: {{ .Release.Name }}-yugaware-pg-upgrade
items:
- key: pg-upgrade-11-to-14.sh
path: pg-upgrade-11-to-14.sh
{{- end }}
- name: pg-init
configMap:
name: {{ .Release.Name }}-yugaware-pg-prerun
items:
- key: pg-prerun.sh
path: pg-prerun.sh
{{- if .Values.postgres.extraVolumes -}}
{{- include "yugaware.isExtraVolumesMappingExists" .Values.postgres -}}
{{- .Values.postgres.extraVolumes | toYaml | nindent 8 -}}
{{ end }}
{{- with .Values.dnsConfig }}
dnsConfig: {{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.dnsPolicy }}
dnsPolicy: {{ . | quote }}
{{- end }}
initContainers:
- image: {{ include "full_yugaware_image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- if .Values.initContainers.prometheusConfiguration.resources }}
resources: {{- toYaml .Values.initContainers.prometheusConfiguration.resources | nindent 12 }}
{{ end -}}
name: prometheus-configuration
{{- if .Values.securityContext.enabled }}
command:
- 'bash'
- '-c'
- |
cp /default_prometheus_config/prometheus.yml /prometheus_configs/prometheus.yml && /bin/bash /init-container/init-permissions.sh;
securityContext:
runAsUser: {{ .Values.securityContext.runAsUser }}
runAsGroup: {{ .Values.securityContext.runAsGroup }}
runAsNonRoot: {{ .Values.securityContext.runAsNonRoot }}
{{- else }}
command: ["cp", "/default_prometheus_config/prometheus.yml", "/prometheus_configs/prometheus.yml"]
{{- end }}
volumeMounts:
- name: prometheus-config
mountPath: /default_prometheus_config
- name: yugaware-storage
mountPath: /prometheus_configs
subPath: prometheus.yml
{{- if .Values.securityContext.enabled }}
- name: yugaware-storage
mountPath: /opt/yugabyte/yugaware/data/
subPath: data
- name: init-container-script
mountPath: /init-container
{{- end }}
{{- if not (and (.Values.ocpCompatibility.enabled) (eq .Values.image.postgres.registry "registry.redhat.io")) }}
- image: {{ include "full_image" (dict "containerName" "postgres-upgrade" "root" .) }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
name: postgres-upgrade
{{- if .Values.initContainers.postgresUpgrade.resources }}
resources: {{- toYaml .Values.initContainers.postgresUpgrade.resources | nindent 12 }}
{{ end -}}
command:
- 'bash'
- '-c'
- /bin/bash /pg_upgrade_11_to_14/pg-upgrade-11-to-14.sh;
env:
- name: PGDATANEW
value: /var/lib/postgresql/14/pgdata
- name: PGDATAOLD
value: /var/lib/postgresql/11/pgdata
# https://github.com/tianon/docker-postgres-upgrade/issues/10#issuecomment-523020113
- name: PGUSER
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-yugaware-global-config
key: postgres_user
- name: POSTGRES_INITDB_ARGS
value: "-U $PGUSER"
volumeMounts:
- name: yugaware-storage
mountPath: /var/lib/postgresql/11/
subPath: postgres_data
- name: yugaware-storage
mountPath: /var/lib/postgresql/14/
subPath: postgres_data_14
- name: pg-upgrade-11-to-14
mountPath: /pg_upgrade_11_to_14
- name: yugaware-storage
mountPath: /pg_upgrade_logs
subPath: postgres_data_14
{{- end }}
{{- if .Values.securityContext.enabled }}
- image: {{ include "full_image" (dict "containerName" "postgres" "root" .) }}
name: postgres-init
{{- if .Values.initContainers.postgresInit.resources }}
resources: {{- toYaml .Values.initContainers.postgresInit.resources | nindent 12 }}
{{ end -}}
imagePullPolicy: {{ .Values.image.pullPolicy }}
command: ["/bin/bash", "/pg_prerun/pg-prerun.sh"]
env:
- name: PGDATA
value: /var/lib/postgresql/data/pgdata
- name: PG_UID
value: {{ .Values.securityContext.runAsUser | quote }}
- name: PG_GID
value: {{ .Values.securityContext.runAsGroup | quote }}
volumeMounts:
- name: yugaware-storage
mountPath: /var/lib/postgresql/data
subPath: postgres_data_14
- name: pg-init
mountPath: /pg_prerun
{{- end }}
containers:
{{ if not .Values.postgres.external.host }}
- name: postgres
image: {{ include "full_image" (dict "containerName" "postgres" "root" .) }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
args:
{{- if and (.Values.ocpCompatibility.enabled) (eq .Values.image.postgres.registry "registry.redhat.io") }}
- "run-postgresql"
{{- end }}
- "-c"
- "huge_pages=off"
{{- if .Values.securityContext.enabled }}
securityContext:
runAsUser: {{ required "runAsUser cannot be empty" .Values.securityContext.runAsUser }}
runAsGroup: {{ .Values.securityContext.runAsGroup | default 0 }}
runAsNonRoot: {{ .Values.securityContext.runAsNonRoot }}
{{- end }}
env:
- name: POSTGRES_USER
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-yugaware-global-config
key: postgres_user
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-yugaware-global-config
key: postgres_password
- name: POSTGRES_DB
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-yugaware-global-config
key: postgres_db
{{- if and (.Values.ocpCompatibility.enabled) (eq .Values.image.postgres.registry "registry.redhat.io") }}
# Hardcoded the POSTGRESQL_USER because it's mandatory env var in RH PG image
# It doesn't have access to create the DB, so YBA fails to create the perf_advisor DB.
# Need to use admin user of RH PG image (postgres)
# Changing the user name won't be possible moving forward for OpenShift certified chart
- name: POSTGRESQL_USER
value: pg-yba
# valueFrom:
# secretKeyRef:
# name: {{ .Release.Name }}-yugaware-global-config
# key: postgres_user
- name: POSTGRESQL_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-yugaware-global-config
key: postgres_password
- name: POSTGRESQL_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-yugaware-global-config
key: postgres_password
- name: POSTGRESQL_DATABASE
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-yugaware-global-config
key: postgres_db
{{- else }}
# The RH Postgres image doesn't allow this directory to be changed.
- name: PGDATA
value: /var/lib/postgresql/data/pgdata
{{- end }}
ports:
- containerPort: 5432
name: postgres
{{- if .Values.postgres.resources }}
resources:
{{ toYaml .Values.postgres.resources | indent 12 }}
{{ end }}
volumeMounts:
- name: yugaware-storage
{{- if and (.Values.ocpCompatibility.enabled) (eq .Values.image.postgres.registry "registry.redhat.io") }}
mountPath: /var/lib/pgsql/data
subPath: postgres_data_13
{{- else }}
mountPath: /var/lib/postgresql/data
subPath: postgres_data_14
{{- end }}
{{- if .Values.postgres.extraVolumeMounts -}}
{{- include "yugaware.isExtraVolumesMappingExists" .Values.postgres -}}
{{- .Values.postgres.extraVolumeMounts | toYaml | nindent 12 -}}
{{- end -}}
{{ end }}
- name: prometheus
image: {{ include "full_image" (dict "containerName" "prometheus" "root" .) }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- if .Values.securityContext.enabled }}
securityContext:
runAsUser: {{ .Values.securityContext.runAsUser }}
runAsGroup: {{ .Values.securityContext.runAsGroup }}
runAsNonRoot: {{ .Values.securityContext.runAsNonRoot }}
{{- else if (not .Values.ocpCompatibility.enabled) }}
securityContext:
runAsUser: 0
{{- end }}
{{- if .Values.prometheus.resources }}
resources:
{{ toYaml .Values.prometheus.resources | indent 12 }}
{{ end }}
volumeMounts:
- name: yugaware-storage
mountPath: /prometheus_configs
subPath: prometheus.yml
- name: yugaware-storage
mountPath: /prometheus/
- mountPath: /opt/yugabyte/yugaware/data/keys/
name: yugaware-storage
subPath: data/keys
{{- if .Values.prometheus.scrapeNodes }}
- name: yugaware-storage
mountPath: /opt/yugabyte/prometheus/targets
subPath: swamper_targets
{{- end }}
{{- if .Values.prometheus.evaluateAlertRules }}
- name: yugaware-storage
mountPath: /opt/yugabyte/prometheus/rules
subPath: swamper_rules
{{- end }}
{{- if .Values.prometheus.remoteWrite.tls.enabled }}
- name: {{ .Release.Name }}-yugaware-prometheus-remote-write-tls
mountPath: /opt/remote_write/certs/
readOnly: true
{{- end }}
args:
- --config.file=/prometheus_configs/prometheus.yml
- --storage.tsdb.path=/prometheus/
- --web.enable-admin-api
- --web.enable-lifecycle
- --storage.tsdb.retention.time={{ .Values.prometheus.retentionTime }}
- --query.max-concurrency={{ .Values.prometheus.queryConcurrency }}
- --query.max-samples={{ .Values.prometheus.queryMaxSamples }}
- --query.timeout={{ .Values.prometheus.queryTimeout }}
ports:
- containerPort: 9090
- name: yugaware
image: {{ include "full_yugaware_image" . }}
{{- if .Values.securityContext.enabled }}
securityContext:
runAsUser: {{ .Values.securityContext.runAsUser }}
runAsGroup: {{ .Values.securityContext.runAsGroup }}
runAsNonRoot: {{ .Values.securityContext.runAsNonRoot }}
{{- end }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- if .Values.yugaware.resources }}
resources:
{{ toYaml .Values.yugaware.resources | indent 12 }}
{{- end }}
args: ["bin/yugaware","-Dconfig.file=/data/application.docker.conf"]
env:
# Conditionally set these env variables, if runAsUser is not 0(root)
# or 10001(yugabyte).
{{- if eq (include "checkNssWrapperExportRequired" .) "true" }}
- name: NSS_WRAPPER_GROUP
value: "/tmp/group.template"
- name: NSS_WRAPPER_PASSWD
value: "/tmp/passwd.template"
- name: LD_PRELOAD
value: "/usr/lib64/libnss_wrapper.so"
{{- end }}
- name: POSTGRES_USER
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-yugaware-global-config
key: postgres_user
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-yugaware-global-config
key: postgres_password
- name: POSTGRES_DB
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-yugaware-global-config
key: postgres_db
- name: APP_SECRET
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-yugaware-global-config
key: app_secret
{{- with .Values.yugaware.extraEnv }}{{ toYaml . | nindent 12 }}{{ end }}
ports:
- containerPort: 9000
name: yugaware
volumeMounts:
- name: yugaware-config
mountPath: /data
- name: yugaware-storage
mountPath: /opt/yugabyte/yugaware/data/
subPath: data
# old path for backward compatibility
- name: yugaware-storage
mountPath: /opt/yugaware_data/
subPath: data
- name: yugaware-storage
mountPath: /opt/yugabyte/releases/
subPath: releases
- name: yugaware-storage
mountPath: /opt/yugabyte/ybc/releases/
subPath: ybc_releases
# old path for backward compatibility
- name: yugaware-storage
mountPath: /opt/releases/
subPath: releases
- name: yugaware-storage
mountPath: /opt/yugabyte/prometheus/targets
subPath: swamper_targets
- name: yugaware-storage
mountPath: /opt/yugabyte/prometheus/rules
subPath: swamper_rules
- name: yugaware-storage
mountPath: /prometheus_configs
subPath: prometheus.yml
{{- if .Values.tls.enabled }}
- name: {{ .Release.Name }}-yugaware-tls-pem
mountPath: /opt/certs/
readOnly: true
{{- end }}
{{ if .Values.sidecars }}
{{ toYaml .Values.sidecars | indent 8 }}
{{ end }}

View File

@ -0,0 +1,37 @@
apiVersion: v1
kind: Pod
metadata:
name: {{ .Release.Name }}-yugaware-test
labels:
app: {{ .Release.Name }}-yugaware-test
chart: {{ template "yugaware.chart" . }}
release: {{ .Release.Name }}
annotations:
"helm.sh/hook": test
spec:
imagePullSecrets:
- name: {{ .Values.image.pullSecret }}
containers:
- name: yugaware-test
image: {{ include "full_yugaware_image" . }}
command:
- '/bin/bash'
- '-ec'
- >
sleep 60s;
{{- if .Values.tls.enabled }}
- >
curl --head -k https://{{ .Release.Name }}-yugaware-ui
{{- else }}
- >
curl --head http://{{ .Release.Name }}-yugaware-ui
{{- end }}
# Hard coded resources to the test pod.
resources:
limits:
cpu: "1"
memory: "512Mi"
requests:
cpu: "0.5"
memory: "256Mi"
restartPolicy: Never

View File

@ -0,0 +1,21 @@
# Copyright (c) YugaByte, Inc.
{{- if .Values.yugaware.universe_boot_script }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-universe-boot-script
labels:
app: {{ template "yugaware.name" . }}
chart: {{ template "yugaware.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Values.helm2Legacy | ternary "Tiller" (.Release.Service | quote) }}
data:
universe_boot_script: |
{{- if hasPrefix "#!" .Values.yugaware.universe_boot_script }}
{{ tpl .Values.yugaware.universe_boot_script . | indent 4 }}
{{- else }}
{{ tpl (.Files.Get .Values.yugaware.universe_boot_script) . | indent 4 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,23 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ .Release.Name }}-yugaware-storage
{{- if .Values.yugaware.storageAnnotations }}
annotations:
{{ toYaml .Values.yugaware.storageAnnotations | indent 4 }}
{{- end }}
labels:
app: {{ template "yugaware.name" . }}
chart: {{ template "yugaware.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Values.helm2Legacy | ternary "Tiller" (.Release.Service | quote) }}
spec:
accessModes:
- ReadWriteOnce
{{- if .Values.yugaware.storageClass }}
storageClassName: {{ .Values.yugaware.storageClass }}
{{- end }}
resources:
requests:
storage: {{ .Values.yugaware.storage }}

View File

@ -0,0 +1,40 @@
suite: Resources verification
templates:
- statefulset.yaml
- configs.yaml
tests:
- it: YBA container
template: statefulset.yaml
asserts:
- isNotEmpty:
path: spec.template.spec.containers[?(@.name == "yugaware")].resources.requests
- it: Postgres container
template: statefulset.yaml
asserts:
- isNotEmpty:
path: spec.template.spec.containers[?(@.name == "postgres")].resources.requests
- it: Prometheus container
template: statefulset.yaml
asserts:
- isNotEmpty:
path: spec.template.spec.containers[?(@.name == "prometheus")].resources.requests
- it: Postgres-init initContainer
template: statefulset.yaml
asserts:
- isNotEmpty:
path: spec.template.spec.initContainers[?(@.name == "postgres-init")].resources.requests
- it: Prometheus-configuration initContainer
template: statefulset.yaml
asserts:
- isNotEmpty:
path: spec.template.spec.initContainers[?(@.name == "prometheus-configuration")].resources.requests
- it: Postgres-upgrade initContainer
template: statefulset.yaml
asserts:
- isNotEmpty:
path: spec.template.spec.initContainers[?(@.name == "postgres-upgrade")].resources.requests

View File

@ -0,0 +1,328 @@
# Default values for yugaware.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
fullnameOverride: ""
nameOverride: ""
image:
commonRegistry: ""
# Setting commonRegistry to say, quay.io overrides the registry settings for all images
# including the yugaware image
repository: quay.io/yugabyte/yugaware
tag: 2.18.9.0-b17
pullPolicy: IfNotPresent
pullSecret: yugabyte-k8s-pull-secret
## Docker config JSON File name
## If set, this file content will be used to automatically create secret named as above
pullSecretFile: ""
postgres:
registry: ""
tag: '14.9'
name: postgres
postgres-upgrade:
registry: ""
tag: "11-to-14"
name: tianon/postgres-upgrade
prometheus:
registry: ""
tag: v2.47.1
name: prom/prometheus
yugaware:
replicas: 1
storage: 100Gi
storageClass: ""
storageAnnotations: {}
multiTenant: false
## Name of existing ServiceAccount. When provided, the chart won't create a ServiceAccount.
## It will attach the required RBAC roles to it.
## Helpful in Yugabyte Platform GKE App.
serviceAccount: ''
serviceMonitor:
enabled: false
annotations: {}
serviceAccountAnnotations: {}
service:
annotations: {}
clusterIP: ""
enabled: true
ip: ""
type: "LoadBalancer"
## whitelist source CIDRs
#loadBalancerSourceRanges:
#- 0.0.0.0/0
#- 192.168.100.0/24
pod:
annotations: {}
labels: {}
health:
username: ""
password: ""
email: ""
resources:
requests:
cpu: "2"
memory: 4Gi
enableProxyMetricsAuth: true
## List of additional alowed CORS origins in case of complex rev-proxy
additionAllowedCorsOrigins: []
proxyEndpointTimeoutMs: 3 minute
## Enables features specific for cloud deployments
cloud:
enabled: false
requestIdHeader: "X-REQUEST-ID"
podDisruptionBudget:
# See https://kubernetes.io/docs/tasks/run-application/configure-pdb/
# Note that the default of 0 doesn't really make sense since a StatefulSet isn't allowed to schedule extra replicas. However it is maintained as the default while we do additional testing. This value will likely change in the future.
maxUnavailable: 0
universe_boot_script: ""
extraEnv: []
# In case client wants to enable the additional headers to the YBA's http response
# Previously, it was possible via nginx, but given that we no longer have it, we can
# expose the same as application config/runtime config.
# Example: ["X-Content-Type-Options: nosniff", "Keep-Alive: timeout=5, max=1000"]
custom_headers: []
## Configure PostgreSQL part of the application
postgres:
# DO NOT CHANGE if using OCP Certified helm chart
user: postgres
dbname: yugaware
service:
## Expose internal Postgres as a Service
enabled: false
## Additional Service annotations
annotations: {}
## Service type
type: "ClusterIP"
## IP address for the LoadBalancer, works only if supported by the cloud provider
ip: ""
resources:
requests:
cpu: "0.5"
memory: 1Gi
# If external.host is set then we will connect to an external postgres database server instead of starting our own.
external:
host: ""
port: 5432
pass: ""
dbname: postgres
user: postgres
## JDBC connection parameters including the leading `?`.
jdbcParams: ""
## Extra volumes
## extraVolumesMounts are mandatory for each extraVolumes.
## Ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#volume-v1-core
## Example:
# extraVolumes:
# - name: custom-nfs-vol
# persistentVolumeClaim:
# claimName: some-nfs-claim
extraVolumes: []
## Extra volume mounts
## Ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#volumemount-v1-core
## Example:
# extraVolumeMounts:
# - name: custom-nfs-vol
# mountPath: /home/yugabyte/nfs-backup
extraVolumeMounts: []
tls:
enabled: false
hostname: "localhost"
## Expects base 64 encoded values for certificate and key.
certificate: ""
key: ""
sslProtocols: "" # if set, override default Nginx SSL protocols setting
## cert-manager values
## If cert-manager is enabled:
## If genSelfsigned: true:
## Create a self-signed issuer/clusterIssuer
## Generate a rootCA using the above issuer.
## Generate a tls certificate with secret name as: {{ .Release.Name }}-yugaware-tls-cert
## Else if genSelfsigned: false:
## Expect a clusterIssuer/issuer to be provided by user
## Generate a tls cert based on above issuer with secret name as: {{ .Release.Name }}-yugaware-tls-cert
certManager:
enabled: false
genSelfsigned: true
useClusterIssuer: false
clusterIssuer: cluster-ca
issuer: yugaware-ca
## Configuration for the TLS certificate requested from Issuer/ClusterIssuer
configuration:
duration: 8760h # 90d
renewBefore: 240h # 15d
algorithm: RSA # ECDSA or RSA
# Can be 2048, 4096 or 8192 for RSA
# Or 256, 384 or 521 for ECDSA
keySize: 2048
## yugaware pod Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
securityContext:
enabled: true
## fsGroup related values are set at the pod level.
fsGroup: 10001
fsGroupChangePolicy: "OnRootMismatch"
## Expected to have runAsUser values != 0 when
## runAsNonRoot is set to true, otherwise container creation fails.
runAsUser: 10001
runAsGroup: 10001
runAsNonRoot: true
helm:
timeout: 900
packagePath: "/opt/yugabyte/helm"
domainName: "cluster.local"
helm2Legacy: false
ip_version_support: "v4_only" # v4_only, v6_only are the only supported values at the moment
rbac:
## Set this to false if you don't have enough permissions to create
## ClusterRole and Binding, for example an OpenShift cluster. When
## set to false, some of the graphs from Container section of the
## Metrics UI don't work.
create: true
## In order to deploy on OpenShift Container Platform, set this to
## true.
ocpCompatibility:
enabled: false
# Extra containers to add to the pod.
sidecars: []
## Following two controls for placement of pod - nodeSelector and AZ affinity.
## Note: Remember to also provide a yugaware.StorageClass that has a olumeBindingMode of
## WaitForFirstConsumer so that the PVC is created in the right topology visible to this pod.
## See https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
## eg.
## nodeSelector:
## topology.kubernetes.io/region: us-west1
nodeSelector: {}
## Affinity to a particular zone for the pod.
## See https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
## eg.
## nodeAffinity:
## requiredDuringSchedulingIgnoredDuringExecution:
## nodeSelectorTerms:
## - matchExpressions:
## - key: failure-domain.beta.kubernetes.io/zone
## operator: In
## values:
## - us-west1-a
## - us-west1-b
zoneAffinity: {}
## The tolerations that the pod should have.
## See https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
tolerations: []
## @param dnsPolicy DNS Policy for pod
## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/
## E.g.
## dnsPolicy: ClusterFirst
dnsPolicy: ""
## @param dnsConfig DNS Configuration pod
## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/
## E.g.
## dnsConfig:
## options:
## - name: ndots
## value: "4"
dnsConfig: {}
## Don't want prometheus to scrape nodes and evaluate alert rules in some cases (for example - cloud).
prometheus:
## Setting this to false will disable scraping of TServer and Master
## nodes (could be pods or VMs)
scrapeNodes: true
evaluateAlertRules: true
retentionTime: 15d
queryConcurrency: 20
queryMaxSamples: 5000000
queryTimeout: 30s
## Set this to false to disable scraping of Kubernetes worker
## nodes. Setting this to false will results in blank graphs of
## resource utilization for Kubernetes universes. Useful for
## scenarios where only VM based universes are being created.
scrapeKubernetesNodes: true
resources:
requests:
cpu: "2"
memory: 4Gi
## Prometheus remote write config, as described here:
## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write
## If tls configuration is needed, set prometheus.remoteWrite.tls.enabled = true and provide
## necessary certificates/keys in base64 format as prometheus.remoteWrite.tls.[caCert|cert|key].
## Remote write config should expect certs/keys in
## /opt/remote_write/certs/[ca.crt|client.crt|client.key] respectively.
remoteWrite:
config: []
tls:
enabled: false
## base64 encoded certificates and key expected
caCert: ""
clientCert: ""
clientKey: ""
# Arbitrary key=value config entries for application.docker.conf
additionalAppConf:
stringConf: {}
nonStringConf: {}
jdbcParams: ""
## Override the APIVersion used by policy group for
## PodDisruptionBudget resources. The chart selects the correct
## APIVersion based on the target Kubernetes cluster. You don't need
## to modify this unless you are using helm template command i.e. GKE
## app's deployer image against a Kubernetes cluster >= 1.21.
# pdbPolicyVersionOverride: "v1beta1"
pdbPolicyVersionOverride: ""
initContainers:
prometheusConfiguration:
resources:
## https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-requests-and-limits-of-pod-and-container
## Use the above link to learn more about Kubernetes resources configuration.
requests:
cpu: "0.25"
memory: 500Mi
postgresUpgrade:
resources:
requests:
cpu: "0.5"
memory: 500Mi
postgresInit:
resources:
requests:
cpu: "0.25"
memory: 500Mi

View File

@ -38459,6 +38459,32 @@ entries:
- assets/hashicorp/vault-0.25.0.tgz
version: 0.25.0
yugabyte:
- annotations:
catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: YugabyteDB
catalog.cattle.io/kube-version: '>=1.18-0'
catalog.cattle.io/release-name: yugabyte
charts.openshift.io/name: yugabyte
apiVersion: v2
appVersion: 2.18.9.0-b17
created: "2024-08-16T00:47:51.025357908Z"
description: YugabyteDB is the high-performance distributed SQL database for building
global, internet-scale apps.
digest: 6349349280d86367608073a89c936ff08c1107b898d7b4b41323af8b02517084
home: https://www.yugabyte.com
icon: file://assets/icons/yugabyte.jpg
kubeVersion: '>=1.18-0'
maintainers:
- email: sanketh@yugabyte.com
name: Sanketh Indarapu
- email: gjalla@yugabyte.com
name: Govardhan Reddy Jalla
name: yugabyte
sources:
- https://github.com/yugabyte/yugabyte-db
urls:
- assets/yugabyte/yugabyte-2.18.9.tgz
version: 2.18.9
- annotations:
catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: YugabyteDB
@ -39018,6 +39044,32 @@ entries:
- assets/yugabyte/yugabyte-2.14.11.tgz
version: 2.14.11
yugaware:
- annotations:
catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: YugabyteDB Anywhere
catalog.cattle.io/kube-version: '>=1.18-0'
catalog.cattle.io/release-name: yugaware
charts.openshift.io/name: yugaware
apiVersion: v2
appVersion: 2.18.9.0-b17
created: "2024-08-16T00:47:51.053149883Z"
description: YugabyteDB Anywhere provides deployment, orchestration, and monitoring
for managing YugabyteDB clusters. YugabyteDB Anywhere can create a YugabyteDB
cluster with multiple pods provided by Kubernetes or OpenShift and logically
grouped together to form one logical distributed database.
digest: 553bf3c394fdb67a4b9169f89d59d9ec367f8ce4020d72a5bc43be69e5084bb3
home: https://www.yugabyte.com
icon: file://assets/icons/yugaware.jpg
kubeVersion: '>=1.18-0'
maintainers:
- email: sanketh@yugabyte.com
name: Sanketh Indarapu
- email: gjalla@yugabyte.com
name: Govardhan Reddy Jalla
name: yugaware
urls:
- assets/yugabyte/yugaware-2.18.9.tgz
version: 2.18.9
- annotations:
catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: YugabyteDB Anywhere
@ -39588,4 +39640,4 @@ entries:
urls:
- assets/netfoundry/ziti-host-1.5.1.tgz
version: 1.5.1
generated: "2024-08-15T00:45:45.600342711Z"
generated: "2024-08-16T00:47:46.508643026Z"