Remove `TrackVersions` from `yugabyte/yugaware` `upstream.yaml` (#1080)
parent
13adfcc53f
commit
a398712ba1
Binary file not shown.
|
@ -0,0 +1,21 @@
|
|||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
|
@ -0,0 +1,22 @@
|
|||
annotations:
|
||||
catalog.cattle.io/certified: partner
|
||||
catalog.cattle.io/display-name: YugabyteDB Anywhere
|
||||
catalog.cattle.io/kube-version: '>=1.18-0'
|
||||
catalog.cattle.io/release-name: yugaware
|
||||
charts.openshift.io/name: yugaware
|
||||
apiVersion: v2
|
||||
appVersion: 2024.1.3.0-b105
|
||||
description: YugabyteDB Anywhere provides deployment, orchestration, and monitoring
|
||||
for managing YugabyteDB clusters. YugabyteDB Anywhere can create a YugabyteDB cluster
|
||||
with multiple pods provided by Kubernetes or OpenShift and logically grouped together
|
||||
to form one logical distributed database.
|
||||
home: https://www.yugabyte.com
|
||||
icon: file://assets/icons/yugaware.jpg
|
||||
kubeVersion: '>=1.18-0'
|
||||
maintainers:
|
||||
- email: sanketh@yugabyte.com
|
||||
name: Sanketh Indarapu
|
||||
- email: gjalla@yugabyte.com
|
||||
name: Govardhan Reddy Jalla
|
||||
name: yugaware
|
||||
version: 2024.1.3
|
|
@ -0,0 +1,7 @@
|
|||
YugabyteDB Anywhere gives you the simplicity and support to deliver a private database-as-a-service (DBaaS) at scale. Use YugabyteDB Anywhere to deploy YugabyteDB across any cloud anywhere in the world with a few clicks, simplify day 2 operations through automation, and get the services needed to realize business outcomes with the database.
|
||||
|
||||
YugabyteDB Anywhere can be deployed using this Helm chart. Detailed documentation is available at:
|
||||
- [Install YugabyteDB Anywhere software - Kubernetes](https://docs.yugabyte.com/preview/yugabyte-platform/install-yugabyte-platform/install-software/kubernetes/)
|
||||
- [Install YugabyteDB Anywhere software - OpenShift (Helm based)](https://docs.yugabyte.com/preview/yugabyte-platform/install-yugabyte-platform/install-software/openshift/#helm-based-installation)
|
||||
|
||||
[![Artifact HUB](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/yugabyte)](https://artifacthub.io/packages/search?repo=yugabyte)
|
|
@ -0,0 +1,5 @@
|
|||
YugabyteDB Anywhere gives you the simplicity and support to deliver a private database-as-a-service (DBaaS) at scale. Use YugabyteDB Anywhere to deploy YugabyteDB across any cloud anywhere in the world with a few clicks, simplify day 2 operations through automation, and get the services needed to realize business outcomes with the database.
|
||||
|
||||
YugabyteDB Anywhere can be deployed using this helm chart. Detailed documentation is available at <https://docs.yugabyte.com/preview/yugabyte-platform/install-yugabyte-platform/install-software/kubernetes/>
|
||||
|
||||
[![Artifact HUB](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/yugabyte)](https://artifacthub.io/packages/search?repo=yugabyte)
|
|
@ -0,0 +1,29 @@
|
|||
# OCP compatible values for yugaware
|
||||
|
||||
image:
|
||||
|
||||
repository: quay.io/yugabyte/yugaware-ubi
|
||||
|
||||
postgres:
|
||||
# For non default postgres image, set postgres.sampleConfig values accordingly
|
||||
# in values.yaml.
|
||||
registry: registry.redhat.io
|
||||
tag: 1-88.1661531722
|
||||
name: rhscl/postgresql-13-rhel7
|
||||
# Postgres postgresql.conf.sample location on container. This will be used to mount
|
||||
# configmap with custom settings.
|
||||
sampleConfLocation: /opt/rh/rh-postgresql13/root/usr/share/pgsql/postgresql.conf.sample
|
||||
|
||||
prometheus:
|
||||
registry: registry.redhat.io
|
||||
tag: v4.11.0
|
||||
name: openshift4/ose-prometheus
|
||||
|
||||
rbac:
|
||||
create: false
|
||||
|
||||
ocpCompatibility:
|
||||
enabled: true
|
||||
|
||||
securityContext:
|
||||
enabled: false
|
|
@ -0,0 +1,267 @@
|
|||
---
|
||||
questions:
|
||||
## Default images for yugaware pod
|
||||
- variable: questions.defaultYBPlatform
|
||||
default: true
|
||||
label: Default Yugabyte Platform configurations
|
||||
type: boolean
|
||||
show_subquestion_if: false
|
||||
group: "Yugabyte Platform"
|
||||
subquestions:
|
||||
- variable: image.repository
|
||||
default: "quay.io/yugabyte/yugaware"
|
||||
required: false
|
||||
type: string
|
||||
label: Yugabyte Platform image repository
|
||||
description: "Yugabyte Platform image repository"
|
||||
- variable: image.tag
|
||||
default: "2.5.1.0-b153"
|
||||
required: false
|
||||
type: string
|
||||
label: Yugabyte Platform image tag
|
||||
description: "Yugabyte Platform image tag"
|
||||
- variable: image.pullPolicy
|
||||
default: "IfNotPresent"
|
||||
required: false
|
||||
label: Yugabyte Platform image pull policy
|
||||
description: "Yugabyte Platform image pull policy"
|
||||
type: enum
|
||||
options:
|
||||
- "Always"
|
||||
- "IfNotPresent"
|
||||
- variable: image.pullSecret
|
||||
default: "yugabyte-k8s-pull-secret"
|
||||
required: false
|
||||
type: secret
|
||||
label: Yugabyte Platform image pull secret
|
||||
description: "Yugabyte Platform image pull secret"
|
||||
- variable: yugaware.storage
|
||||
default: "100Gi"
|
||||
required: false
|
||||
type: string
|
||||
label: Storage
|
||||
description: "Storage"
|
||||
- variable: yugaware.storageClass
|
||||
default: ""
|
||||
required: false
|
||||
type: storageclass
|
||||
label: Storage Class
|
||||
description: "Storage Class"
|
||||
- variable: yugaware.resources.requests.cpu
|
||||
default: "2"
|
||||
required: false
|
||||
type: string
|
||||
label: CPU request for Yugabyte Platform
|
||||
description: "CPU request for Yugabyte Platform"
|
||||
- variable: yugaware.resources.requests.memory
|
||||
default: "4Gi"
|
||||
required: false
|
||||
type: string
|
||||
label: Memory request for Yugabyte Platform
|
||||
description: "Memory request for Yugabyte Platform"
|
||||
- variable: yugaware.service.enabled
|
||||
default: true
|
||||
description: "Service used to access the Yugabyte Platform"
|
||||
label: Create service for Yugabyte Platform
|
||||
type: boolean
|
||||
show_subquestion_if: false
|
||||
group: "Platform Service"
|
||||
subquestions:
|
||||
- variable: yugaware.service.ip
|
||||
default: ""
|
||||
required: false
|
||||
type: string
|
||||
label: Yugabyte Platform Service IP
|
||||
description: "Yugabyte Platform Service IP"
|
||||
- variable: yugaware.service.type
|
||||
default: "LoadBalancer"
|
||||
required: false
|
||||
type: string
|
||||
label: Yugabyte Platform Service type
|
||||
description: "Yugabyte Platform Service type"
|
||||
- variable: tls.enabled
|
||||
default: false
|
||||
required: false
|
||||
type: boolean
|
||||
label: Enable TLS on Yugabyte Platform
|
||||
description: "Enable TLS on Yugabyte Platform"
|
||||
- variable: tls.hostname
|
||||
default: "localhost"
|
||||
required: false
|
||||
type: string
|
||||
label: Yugabyte Platform TLS hostname
|
||||
description: "Yugabyte Platform TLS hostname"
|
||||
- variable: tls.certificate
|
||||
default: "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUZDVENDQXZHZ0F3SUJBZ0lVTlhvN2N6T2dyUWQrU09wOWdNdE00b1Vva3hFd0RRWUpLb1pJaHZjTkFRRUwKQlFBd0ZERVNNQkFHQTFVRUF3d0piRzlqWVd4b2IzTjBNQjRYRFRJeE1EUXdOakExTXpnMU4xb1hEVE14TURRdwpOREExTXpnMU4xb3dGREVTTUJBR0ExVUVBd3dKYkc5allXeG9iM04wTUlJQ0lqQU5CZ2txaGtpRzl3MEJBUUVGCkFBT0NBZzhBTUlJQ0NnS0NBZ0VBMUxsSTFBLzRPOVIzSkNlN1N2MUxYVXhDSmxoTWpIWUoxV1FNVmcvai82RHkKazRTTmY0MkFLQjI0dFJFK2lEWTBNaTJrRWhJcVZ4TFdPN0hkWHVSN0tYNGxSZWFVVkRFTUtYUWNQUC9QWDZkbwpwZVZTUFpSVjVHNHNxTElXUFFkTVdIam9IQWx1aml5dGJsSVJUUWdLU3QrMmpuREFDN0dxRURMREdhNXRUWEM2CktRWkNtOERlaklOUTMzaGU2TDN0Q2hBRnhJM1pwY21sR0twbzdKVXJSUG14Mk9zTHFRcTB5dEVVK0lGZGppWHEKaHJLeFR0NUhHM3M3ZUNWaTRXdlZPelVGUitJbWRlQzBRZTBXeG5iZlZUMnJkVitQL1FaVXhWSEVtWnBPc0k2LwpmczhlK1dsMlduWXY1TTg5MWkxZER3Zi9lMDdiN20xQVRKdDRtTGRldzBtd1V4UGFGT2pDMDh6cU94NmF0cGhLClU1eHNWQmhGNVhyME9DeTQyMzN0MU5URXdWUEFDOFcwQmhHdldTRXBQTXNTKzM1b2lueEFrcFQzL01ibFpjNisKcXhSYUh6MHJhSksvVGIzelVKVWxWZFkxbGl5MVYyVjNxWEU2NWlsOUFHZ2pIaHhBNFBwSktCbzZ0WVRUT3pnTworL25mc0toMk95aE8zUWxBZ0JFUHlYUm5wL0xGSTVuQ2gzdjNiOXlabFNrSk05NkVoWEJ1bHhWUWN3L2p3N2NxCkRLSlBEeHFUQy9rWUs1V0FVZGhkWG1KQkRNMFBLcngzUGVOYjRsYnQzSTFIZW1QRDBoZktiWFd6alhiVTJQdWQKdjZmT0dXTDRLSFpaem9KZ1ljMFovRXRUMEpCR09GM09mMW42N2c5dDRlUnAzbEVSL09NM0FPY1dRbWFvOHlVQwpBd0VBQWFOVE1GRXdIUVlEVlIwT0JCWUVGTU00SjA4WG8wUGY1cTlOSWZiMGYyRzZqc1FoTUI4R0ExVWRJd1FZCk1CYUFGTU00SjA4WG8wUGY1cTlOSWZiMGYyRzZqc1FoTUE4R0ExVWRFd0VCL3dRRk1BTUJBZjh3RFFZSktvWkkKaHZjTkFRRUxCUUFEZ2dJQkFBRmxrWVJkdzA0Zm9vT29BelUyaU5ORGV1aiszemhIeFQ5eU9iSkdwREZIRitoZQpuY1ZRWGZpMitHNjBWY0xuZERsWFhmbDZLOSs4ME55aEg4QjR1UEJNTWhoWG01MjJmYnJac1dFcnR3WE1rM2prClZ5UVA3MGk2NHE1ZGVrZzhoYzI0SXhFUlVsam9XM2lDTTdrb0VxaG15VkpGeDNxMVdobFEwdzNkWVpMQVNRclYKU0RpL2JGWjlqOXVtWVdoc0Y4QjFPSThPVjNlL0YyakU1UCtoTlJJazAzbW9zWE1Rdy9iZ3ZzV0hvSkZ5blB4UApHNGUzUjBob2NnbzI0Q2xOQ21YMWFBUms5c1pyN2h0NlVsM1F1d0dMdzZkK2I5emxrUW56TzFXQzc5ekVNU1R0ClRRRzFNT2ZlL2dTVkR3dThTSnpBOHV1Z0pYTktWWkxCZlpaNW41Tk9sOHdpOVVLa1BVUW4wOHo3VWNYVDR5ZnQKZHdrbnZnWDRvMFloUnNQNHpPWDF6eWxObzhqRDhRNlV1SkdQSksrN1JnUm8zVERPV3k4MEZpUzBxRmxrSFdMKwptT0pUWGxzaEpwdHE5b1c1eGx6N1lxTnFwZFVnRmNyTjJLQWNmaGVlNnV3SUFnOFJteTQvRlhRZjhKdXluSG5oClFhVlFnTEpEeHByZTZVNk5EdWg1Y1VsMUZTcWNCUGFPY0x0Q0ViVWg5ckQxajBIdkRnTUUvTTU2TGp1UGdGZlEKMS9xeXlDUkFjc2NCSnVMYjRxcXRUb25tZVZ3T1BBbzBsNXBjcC9JcjRTcTdwM0NML0kwT0o1SEhjcmY3d3JWSgpQVWgzdU1LbWVHVDRyeDdrWlQzQzBXenhUU0loc0lZOU12MVRtelF4MEprQm93c2NYaUYrcXkvUkl5UVgKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo="
|
||||
required: false
|
||||
type: string
|
||||
label: Yugabyte Platform TLS Certificate
|
||||
description: "Yugabyte Platform TLS Certificate (base64 encoded)"
|
||||
- variable: tls.key
|
||||
default: "LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUpRd0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQ1Mwd2dna3BBZ0VBQW9JQ0FRRFV1VWpVRC9nNzFIY2sKSjd0Sy9VdGRURUltV0V5TWRnblZaQXhXRCtQL29QS1RoSTEvallBb0hiaTFFVDZJTmpReUxhUVNFaXBYRXRZNwpzZDFlNUhzcGZpVkY1cFJVTVF3cGRCdzgvODlmcDJpbDVWSTlsRlhrYml5b3NoWTlCMHhZZU9nY0NXNk9MSzF1ClVoRk5DQXBLMzdhT2NNQUxzYW9RTXNNWnJtMU5jTG9wQmtLYndONk1nMURmZUY3b3ZlMEtFQVhFamRtbHlhVVkKcW1qc2xTdEUrYkhZNnd1cENyVEswUlQ0Z1YyT0plcUdzckZPM2tjYmV6dDRKV0xoYTlVN05RVkg0aVoxNExSQgo3UmJHZHQ5VlBhdDFYNC85QmxURlVjU1ptazZ3anI5K3p4NzVhWFphZGkva3p6M1dMVjBQQi85N1R0dnViVUJNCm0zaVl0MTdEU2JCVEU5b1U2TUxUek9vN0hwcTJtRXBUbkd4VUdFWGxldlE0TExqYmZlM1UxTVRCVThBTHhiUUcKRWE5WklTazh5eEw3Zm1pS2ZFQ1NsUGY4eHVWbHpyNnJGRm9mUFN0b2tyOU52Zk5RbFNWVjFqV1dMTFZYWlhlcApjVHJtS1gwQWFDTWVIRURnK2trb0dqcTFoTk03T0E3NytkK3dxSFk3S0U3ZENVQ0FFUS9KZEdlbjhzVWptY0tICmUvZHYzSm1WS1FrejNvU0ZjRzZYRlZCekQrUER0eW9Nb2s4UEdwTUwrUmdybFlCUjJGMWVZa0VNelE4cXZIYzkKNDF2aVZ1M2NqVWQ2WThQU0Y4cHRkYk9OZHRUWSs1Mi9wODRaWXZnb2Rsbk9nbUJoelJuOFMxUFFrRVk0WGM1LwpXZnJ1RDIzaDVHbmVVUkg4NHpjQTV4WkNacWp6SlFJREFRQUJBb0lDQUFmY2lScDlOSmxSY3MyOVFpaTFUN0cwCi9jVFpBb3MyV1lxdlZkMWdYUGEzaGY5NXFKa01LNjVQMnVHbUwzOXRNV1NoVnl6cnl2REkyMjM5VnNjSS9wdzcKOHppd0dzODV1TTlYWVN2SDhHd0NqZFdEc2hSZ2hRUWFKa0JkeElDZzRtdHFuSGxjeDk4dE80T1dPTmwxOEp0dgp4UmxpaFZacFRIV295cGtLWHpPN2RNWExXMjdTSStkaGV2Mm5QeXF1eWpIVEFjT1AwbmxVQ0d2dThFMjkvWWxoCkNQZVJTQzhKSEVGYWxNSFNWaGpJd2ZBVWJvVVJwZU1ZSE15RjVTK2JncGZiajhSbVVUR09DbHRkWGJnYjhJai8KN0hROEFlQkIrYVFKTDVEVnFRN1JWN1ppQlMwR2ZyODlHdXdEMUs4em9mcktPdURkdXpjR2hwZk9MeGpGdmhTOApSQ2Y1Z3BFMzg0aWlHc2tWZC9mZDJLK3NhSmk0L09HbHo0aHhhc1hDcTN1TXB5OTZPNFRrMXZzM3BXdWZNVmJXCnR2d1Mrcjhvbk9uOXZqa3lqOU11eUpId1BpSlNGMUt0ZzhPUU5WMlVST0xXcHlYMWk4Z2xoMXdSelRTQ2diQnMKZ3ZxWkFvaU1pWFh3SlVXN3Zpb0RLZjI0TnZvcjViaVNzeUh0MHVKUVZJaW1iK1prTFJwTWdwRlkyTlcrTnd6LwoxOW9DS2ZUVVpWNkJia09IK0NoOUowLy9hTTRGNnUvMTI4V0UxalJQU05mdWQ0b0dpdGVPNXRsRDNWSXRsb1hlCjNyWVMrcTNuYXU1RStWc2FRZGFVNzhrSnpXYmUrWURmQ1JwWGd6TkloSkMyQ1k5d0RSK3hIaVFwbzdLSHV6dngKUkpuRjhIcGwzdWhIdWxEam44dEpBb0lCQVFEeGxhVVIwN1l6TGF2OVZtamZCenpZMjcwOU9tWnhpa3NtRnlhWApKTkJMQVB3SGdXOEVCUHdKOEprSDhXR1NTekp1OXZGd1JDVEVqZ1J5dWUvS05DWnNmUWF2UDg3dzhablJHaEhjCklHUUV1MFN3bmJzZXFJK1VWa0M5amZjaFE4dlowM0dQTGZ6bWpsSW9PNkNLTVM3TlV2Ynk5MksvOHRVVWRtWWgKMmJJa2N4V0J1RDJoenh3K1ZId3ArWktMQ0FPZi9sOG8vQ20xQ1dZSFNGdVYzTkl3T016Z2FKaExJODJNR08zQwpuODZTMXcweGc2MHB5dUV6L0hXZS9JMFZkRGNsWlgyNC9jalVBb01kQlkvSGY4Tkh2ZUNhZExQeXI3eGpRY2NLClAzN0RhdFRyK2RTZ2RoVkxzUDRRRzVVZEZxNUlMSHoxTXBkb2xXZ2pDSlZqcTZMekFvSUJBUURoYXNYdVRzMDIKNEkvYkRlSGRZSmw2Q1NzVUh2NmJXL3dpYlRhd2dpbDh5RUNWS2x6eFY4eENwWnoxWVhRQlY1YnVvQlArbjZCWApnVHgzTTJHc2R5UU1xdGRCWG9qdGp1czB6ekFNQVQzOWNmdWlHMGR0YXF3eWJMVlEwYThDZnFmMDVyUmZ0ekVmCmtTUDk2d01kVUEyTGdCbnU4akwzOU41UkxtK2RpZUdxeDAwYmJTa3l5UE9HNHIvcDl6KzN6TmVmeUhmbm94bTkKUnQza1RpeGhVNkd4UGhOSnZpWEUrWUpwT0dKVXMvK2dUWWpjUE1zRW9ONHIyR215cUs3S21NZExFa3Y1SHliWgprbmNsV2FMVFlhNEpjMjJUaWZJd01NTWMwaCtBMkJVckdjZFZ6MTA0UXluUFZQZDdXcEszenhqcjRPUHh1YnQ2CjZvTWk2REdRSVNlSEFvSUJBUURTK1YyVHFQRDMxczNaU3VvQXc2Qld2ZWVRbmZ5eThSUFpxdVFQb0oycXNxeG0KblpsbXlEZVhNcDloK1dHOVVhQTBtY0dWeWx6VnJqU2lRRkR4cEFOZVFQMWlkSFh6b3ZveVN2TUg2dDJONkVELwpnRy9XUVZ4S0xkMFI3UFhCL2lQN0VaV2RkWXJqaWF5ajZCYTJPR2RuOWlrbFcvZklLM2Y4QzczN2w5TGoxQUVYCkxOL2QvREh0R1BqcDYwTVgyYUxZeVZzdlBxL3BvdENRVVpkeDA4dFhRM05nRXRmVTN1cDFpNXV2bU1IZEtLTWoKOTV0MDRQRTA1aWVOOVgzOEcyYkJhTldYaFVJcUxCdDJiOUgxWmxVU3hQWnR6TGNObkgwSHJYejJMU2MxMzRrYwpueXdhQ2FWbFdhYzJSL0E3Mi8vTmxkUjJpWDBDWDEvM0lGcmVGUmtUQW9JQkFBbGt0S2pRbWRhZWx3QU8zUW1uCm05MnRBaUdOaFJpZVJheDlscGpXWTdveWNoYUZOR2hPTzFIUHF2SEN4TjNGYzZHd0JBVkpTNW81NVhZbUt2elAKM2kyMDlORmhpaDAwSm5NRjZ6K2swWnQ5STNwRzNyd2RoTjE1RURrMDg3RUw3QjNWZTFDOXhvdEZOaFcvdEZxRgpXbnNrdEcvem9kSVpYeVpNNUJQUmloamV3MFRRVUxZd0Q0M2daeFR0MjdiaUQxNDJNV0R5dUFEZU1pTHdhd01IClJDYXBxbzRaSVdQSzdmZEtoVFo0WmIrZFc0V3A5dC9UZ0U2ZGJ4SWwyMXJQOFFZYzFoT2tpNjduWHBXczNZOG4KYytRcTdqY0d1WlB1aEVMd01xWGcyMGozZ3duOVlTb1dDbWo4Wm0rNmY0Q3ZYWjkrdUtEN0YyZncyOVFaanU4dApvb01DZ2dFQkFPbVVHZ1VoT0tUVys1eEpkZlFKRUVXUncyVFF6Z2l6dSt3aVkzaDYrYXNTejRNY0srVGx6bWxVCmFHT013dFhTUzc0RXIxVmlCVXMrZnJKekFPR21IV0ExZWdtaGVlY1BvaE9ybTh5WkVueVJOSkRhWC9UUXBSUnEKaVdoWENBbjJTWFQxcFlsYVBzMjdkbXpFWnQ3UlVUSkJZZ1hHZXQ4dXFjUXZaVDJZK3N6cHFNV3UzaEpWdmIxdgpZNGRJWE12RG1aV1BPVjFwbHJEaTVoc214VW05TDVtWk1IblllNzFOYkhsaEIxK0VUNXZmWFZjOERzU1RRZWRRCitDRHJKNGQ0em85dFNCa2pwYTM5M2RDRjhCSURESUQyWkVJNCtBVW52NWhTNm82NitOLzBONlp3cXkwc2pKY0cKQ21LeS9tNUpqVzFJWDMxSmZ1UU5Ldm9YNkRFN0Zkaz0KLS0tLS1FTkQgUFJJVkFURSBLRVktLS0tLQo="
|
||||
required: false
|
||||
type: string
|
||||
label: Yugabyte Platform TLS key
|
||||
description: "Yugabyte Platform TLS key (based64 encoded)"
|
||||
## Postgres configurations
|
||||
- variable: questions.defaultPostgres
|
||||
default: true
|
||||
description: "Use default postgres configurations"
|
||||
label: Use default postgres configurations
|
||||
type: boolean
|
||||
show_subquestion_if: false
|
||||
group: "Postgres"
|
||||
subquestions:
|
||||
- variable: image.postgres.registry
|
||||
default: ""
|
||||
required: false
|
||||
type: string
|
||||
label: Postgres image registry
|
||||
description: "Postgres image registry"
|
||||
- variable: image.postgres.tag
|
||||
default: "11.5"
|
||||
required: false
|
||||
type: string
|
||||
label: Postgres image tag
|
||||
description: "Postgres image tag"
|
||||
- variable: image.postgres.name
|
||||
default: "postgres"
|
||||
required: false
|
||||
type: string
|
||||
label: Postgres image name
|
||||
description: "Postgres image name"
|
||||
- variable: postgres.service.enabled
|
||||
default: false
|
||||
required: false
|
||||
type: boolean
|
||||
label: Expose internal Postgres as a Service
|
||||
description: "Expose internal Postgres as a Service"
|
||||
- variable: postgres.resources.requests.cpu
|
||||
default: "0.5"
|
||||
required: false
|
||||
type: string
|
||||
label: CPU request for Postgres
|
||||
description: "CPU request for Postgres"
|
||||
- variable: postgres.resources.requests.memory
|
||||
default: "1Gi"
|
||||
required: false
|
||||
type: string
|
||||
label: Memory request for Postgres
|
||||
description: "Memory request for Postgres"
|
||||
- variable: postgres.external.host
|
||||
default: ""
|
||||
required: false
|
||||
type: string
|
||||
label: External host for Postgres
|
||||
description: "External host for Postgres"
|
||||
- variable: postgres.external.port
|
||||
default: 5432
|
||||
required: false
|
||||
type: int
|
||||
label: External host port for Postgres
|
||||
description: "External host port for Postgres"
|
||||
- variable: postgres.external.pass
|
||||
default: ""
|
||||
required: false
|
||||
type: string
|
||||
label: External host password for Postgres
|
||||
description: "External host password for Postgres"
|
||||
- variable: postgres.external.dbname
|
||||
default: "postgres"
|
||||
required: false
|
||||
type: string
|
||||
label: External host Db name for Postgres
|
||||
description: "External host Db name for Postgres"
|
||||
- variable: postgres.external.user
|
||||
default: "postgres"
|
||||
required: false
|
||||
type: string
|
||||
label: External host Db user for Postgres
|
||||
description: "External host Db user for Postgres"
|
||||
- variable: postgres.external.jdbcParams
|
||||
default: ""
|
||||
required: false
|
||||
type: string
|
||||
label: JDBC connection parameters
|
||||
description: "JDBC connection parameters including the leading `?"
|
||||
- variable: questions.defaultPrometheus
|
||||
default: true
|
||||
description: "Default Prometheus configurations"
|
||||
label: Default Prometheus configurations
|
||||
type: boolean
|
||||
show_subquestion_if: false
|
||||
group: "Prometheus"
|
||||
subquestions:
|
||||
- variable: image.prometheus.registry
|
||||
default: ""
|
||||
required: false
|
||||
type: string
|
||||
label: Prometheus image registry
|
||||
description: "Prometheus image registry"
|
||||
- variable: image.prometheus.tag
|
||||
default: "v2.27.1"
|
||||
required: false
|
||||
type: string
|
||||
label: Prometheus image tag
|
||||
description: "Prometheus image tag"
|
||||
- variable: image.prometheus.name
|
||||
default: "prom/prometheus"
|
||||
required: false
|
||||
type: string
|
||||
label: Prometheus image name
|
||||
description: "Prometheus image name"
|
||||
- variable: prometheus.resources.requests.cpu
|
||||
default: "2"
|
||||
required: false
|
||||
type: string
|
||||
label: CPU request for Prometheus
|
||||
description: "CPU request for Prometheus"
|
||||
- variable: prometheus.resources.requests.memory
|
||||
default: "4Gi"
|
||||
required: false
|
||||
type: string
|
||||
label: Memory request for Prometheus
|
||||
- variable: prometheus.retentionTime
|
||||
default: 15d
|
||||
required: false
|
||||
type: string
|
||||
label: Retention Time
|
||||
description: "Retention Time"
|
||||
- variable: securityContext.enabled
|
||||
default: false
|
||||
description: "Enable Security Context"
|
||||
label: Enable Security Context
|
||||
type: boolean
|
||||
show_subquestion_if: true
|
||||
group: "Security Context"
|
||||
subquestions:
|
||||
- variable: securityContext.fsGroup
|
||||
default: 10001
|
||||
required: false
|
||||
type: int
|
||||
label: fsGroup
|
||||
description: "fsGroup"
|
||||
- variable: securityContext.fsGroupChangePolicy
|
||||
default: "OnRootMismatch"
|
||||
required: false
|
||||
type: string
|
||||
label: fsGroupChangePolicy
|
||||
description: "fsGroupChangePolicy"
|
||||
- variable: securityContext.runAsUser
|
||||
default: 10001
|
||||
required: false
|
||||
type: int
|
||||
label: runAsUser
|
||||
description: "runAsUser"
|
||||
- variable: securityContext.runAsGroup
|
||||
default: 10001
|
||||
required: false
|
||||
type: int
|
||||
label: runAsGroup
|
||||
description: "runAsGroup"
|
||||
- variable: securityContext.runAsNonRoot
|
||||
default: true
|
||||
required: false
|
||||
type: boolean
|
||||
label: runAsNonRoot
|
||||
description: "runAsNonRoot"
|
|
@ -0,0 +1,14 @@
|
|||
{{/*
|
||||
The usage of helm upgrade [RELEASE] [CHART] --reuse-values --set [variable]:[value] throws an
|
||||
error in the event that new entries are inserted to the values chart.
|
||||
|
||||
This is because reuse-values flag uses the values from the last release. If --set (/--set-file/
|
||||
--set-string/--values/-f) is applied with the reuse-values flag, the values from the last
|
||||
release are overridden for those variables alone, and newer changes to the chart are
|
||||
unacknowledged.
|
||||
|
||||
https://medium.com/@kcatstack/understand-helm-upgrade-flags-reset-values-reuse-values-6e58ac8f127e
|
||||
|
||||
To prevent errors while applying upgrade with --reuse-values and --set flags after introducing
|
||||
new variables, default values can be specified in this file.
|
||||
*/}}
|
|
@ -0,0 +1,281 @@
|
|||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "yugaware.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "yugaware.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "yugaware.chart" -}}
|
||||
{{- printf "%s" .Chart.Name | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Implements customization for the registry for component images.
|
||||
|
||||
The preference is to use the image.commonRegistry field first if it is set.
|
||||
Otherwise the local registry override for each image is used if set, for ex: image.postgres.registry
|
||||
|
||||
In both cases, the image name and tag can be customized by using the overrides for each image, for ex: image.postgres.name
|
||||
*/}}
|
||||
{{- define "full_image" -}}
|
||||
{{- $specific_registry := (get (get .root.Values.image .containerName) "registry") -}}
|
||||
{{- if not (empty .root.Values.image.commonRegistry) -}}
|
||||
{{- $specific_registry = .root.Values.image.commonRegistry -}}
|
||||
{{- end -}}
|
||||
{{- if not (empty $specific_registry) -}}
|
||||
{{- $specific_registry = printf "%s/" $specific_registry -}}
|
||||
{{- end -}}
|
||||
{{- $specific_name := (toString (get (get .root.Values.image .containerName) "name")) -}}
|
||||
{{- $specific_tag := (toString (get (get .root.Values.image .containerName) "tag")) -}}
|
||||
{{- printf "%s%s:%s" $specific_registry $specific_name $specific_tag -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Implements customization for the registry for the yugaware docker image.
|
||||
|
||||
The preference is to use the image.commonRegistry field first if it is set.
|
||||
Otherwise the image.repository field is used.
|
||||
|
||||
In both cases, image.tag can be used to customize the tag of the yugaware image.
|
||||
*/}}
|
||||
{{- define "full_yugaware_image" -}}
|
||||
{{- $specific_registry := .Values.image.repository -}}
|
||||
{{- if not (empty .Values.image.commonRegistry) -}}
|
||||
{{- $specific_registry = printf "%s/%s" .Values.image.commonRegistry "yugabyte/yugaware" -}}
|
||||
{{- end -}}
|
||||
{{- $specific_tag := (toString .Values.image.tag) -}}
|
||||
{{- printf "%s:%s" $specific_registry $specific_tag -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Get or generate PG password
|
||||
Source - https://github.com/helm/charts/issues/5167#issuecomment-843962731
|
||||
*/}}
|
||||
{{- define "getOrGeneratePassword" }}
|
||||
{{- $len := (default 8 .Length) | int -}}
|
||||
{{- $obj := (lookup "v1" .Kind .Namespace .Name).data -}}
|
||||
{{- if $obj }}
|
||||
{{- index $obj .Key -}}
|
||||
{{- else if (eq (lower .Kind) "secret") -}}
|
||||
{{- randAlphaNum $len | b64enc -}}
|
||||
{{- else -}}
|
||||
{{- randAlphaNum $len -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Similar to getOrGeneratePassword but written for migration from
|
||||
ConfigMap to Secret. Secret is given precedence, and then the upgrade
|
||||
case of ConfigMap to Secret is handled.
|
||||
TODO: remove this after few releases i.e. once all old platform
|
||||
installations are upgraded, and use getOrGeneratePassword.
|
||||
*/}}
|
||||
{{- define "getOrGeneratePasswordConfigMapToSecret" }}
|
||||
{{- $len := (default 8 .Length) | int -}}
|
||||
{{- $obj := (lookup "v1" "Secret" .Namespace .Name).data -}}
|
||||
{{- if $obj }}
|
||||
{{- index $obj .Key -}}
|
||||
{{- else -}}
|
||||
{{- $obj := (lookup "v1" "ConfigMap" .Namespace .Name).data -}}
|
||||
{{- if $obj }}
|
||||
{{- index $obj .Key | b64enc -}}
|
||||
{{- else -}}
|
||||
{{- randAlphaNum $len | b64enc -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Make list of allowed CORS origins
|
||||
*/}}
|
||||
{{- define "allowedCorsOrigins" -}}
|
||||
[
|
||||
{{- range .Values.yugaware.additionAllowedCorsOrigins -}}
|
||||
{{- . | quote }},
|
||||
{{- end -}}
|
||||
{{- if .Values.tls.enabled -}}
|
||||
"https://{{ .Values.tls.hostname }}"
|
||||
{{- else -}}
|
||||
"http://{{ .Values.tls.hostname }}"
|
||||
{{- end -}}
|
||||
]
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Get or generate server cert and key
|
||||
*/}}
|
||||
{{- define "getOrCreateServerCert" -}}
|
||||
{{- $root := .Root -}}
|
||||
{{- if and $root.Values.tls.certificate $root.Values.tls.key -}}
|
||||
server.key: {{ $root.Values.tls.key }}
|
||||
server.crt: {{ $root.Values.tls.certificate }}
|
||||
{{- if $root.Values.tls.ca_certificate -}}
|
||||
ca.crt: {{ $root.Values.tls.ca_certificate }}
|
||||
{{- end -}}
|
||||
{{- else -}}
|
||||
{{- $result := (lookup "v1" "Secret" .Namespace .Name).data -}}
|
||||
{{- if and $result (index $result "server.pem") (index $result "ca.pem") -}}
|
||||
server.key: {{ index $result "server.key" }}
|
||||
server.crt: {{ index $result "server.crt" }}
|
||||
ca.crt: {{ index $result "ca.crt" }}
|
||||
{{- else -}}
|
||||
{{- $caCert := genCA $root.Values.tls.hostname 3650 -}}
|
||||
{{- $cert := genSignedCert $root.Values.tls.hostname nil nil 3650 $caCert -}}
|
||||
server.key: {{ $cert.Key | b64enc }}
|
||||
server.crt: {{ $cert.Cert | b64enc }}
|
||||
ca.crt: {{ $caCert.Cert | b64enc }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Get or generate server key cert in pem format
|
||||
*/}}
|
||||
{{- define "getOrCreateServerPem" -}}
|
||||
{{- $root := .Root -}}
|
||||
{{- if and $root.Values.tls.certificate $root.Values.tls.key -}}
|
||||
{{- $decodedKey := $root.Values.tls.key | b64dec -}}
|
||||
{{- $decodedCert := $root.Values.tls.certificate | b64dec -}}
|
||||
{{- $serverPemContentTemp := ( printf "%s\n%s" $decodedKey $decodedCert ) -}}
|
||||
{{- $serverPemContent := $serverPemContentTemp | b64enc -}}
|
||||
{{- if $root.Values.tls.ca_certificate -}}
|
||||
{{- $caPemContent := $root.Values.tls.ca_certificate -}}
|
||||
ca.pem: {{ $caPemContent }}
|
||||
{{- end}}
|
||||
server.pem: {{ $serverPemContent }}
|
||||
{{- else -}}
|
||||
{{- $result := (lookup "v1" "Secret" .Namespace .Name).data -}}
|
||||
{{- if and $result (index $result "server.pem") (index $result "ca.pem") -}}
|
||||
{{- $serverPemContent := ( index $result "server.pem" ) -}}
|
||||
{{- $caPemContent := ( index $result "ca.pem" ) -}}
|
||||
ca.pem: {{ $caPemContent }}
|
||||
server.pem: {{ $serverPemContent }}
|
||||
{{- else -}}
|
||||
{{- $caCert := genCA $root.Values.tls.hostname 3650 -}}
|
||||
{{- $cert := genSignedCert $root.Values.tls.hostname nil nil 3650 $caCert -}}
|
||||
{{- $serverPemContentTemp := ( printf "%s\n%s" $cert.Key $cert.Cert ) -}}
|
||||
{{- $serverPemContent := $serverPemContentTemp | b64enc -}}
|
||||
{{- $caPemContentTemp := ( printf "%s" $caCert.Cert ) -}}
|
||||
{{- $caPemContent := $caPemContentTemp | b64enc -}}
|
||||
server.pem: {{ $serverPemContent }}
|
||||
ca.pem: {{ $caPemContent }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Check export of nss_wrapper environment variables required
|
||||
*/}}
|
||||
{{- define "checkNssWrapperExportRequired" -}}
|
||||
{{- if .Values.securityContext.enabled -}}
|
||||
{{- if and (ne (int .Values.securityContext.runAsUser) 0) (ne (int .Values.securityContext.runAsUser) 10001) -}}
|
||||
{{- printf "true" -}}
|
||||
{{- end -}}
|
||||
{{- else -}}
|
||||
{{- printf "false" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
|
||||
{{/*
|
||||
Verify the extraVolumes and extraVolumeMounts mappings.
|
||||
Every extraVolumes should have extraVolumeMounts
|
||||
*/}}
|
||||
{{- define "yugaware.isExtraVolumesMappingExists" -}}
|
||||
{{- $lenExtraVolumes := len .extraVolumes -}}
|
||||
{{- $lenExtraVolumeMounts := len .extraVolumeMounts -}}
|
||||
|
||||
{{- if and (eq $lenExtraVolumeMounts 0) (gt $lenExtraVolumes 0) -}}
|
||||
{{- fail "You have not provided the extraVolumeMounts for extraVolumes." -}}
|
||||
{{- else if and (eq $lenExtraVolumes 0) (gt $lenExtraVolumeMounts 0) -}}
|
||||
{{- fail "You have not provided the extraVolumes for extraVolumeMounts." -}}
|
||||
{{- else if and (gt $lenExtraVolumes 0) (gt $lenExtraVolumeMounts 0) -}}
|
||||
{{- $volumeMountsList := list -}}
|
||||
{{- range .extraVolumeMounts -}}
|
||||
{{- $volumeMountsList = append $volumeMountsList .name -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- $volumesList := list -}}
|
||||
{{- range .extraVolumes -}}
|
||||
{{- $volumesList = append $volumesList .name -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- range $volumesList -}}
|
||||
{{- if not (has . $volumeMountsList) -}}
|
||||
{{- fail (printf "You have not provided the extraVolumeMounts for extraVolume %s" .) -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- range $volumeMountsList -}}
|
||||
{{- if not (has . $volumesList) -}}
|
||||
{{- fail (printf "You have not provided the extraVolumes for extraVolumeMounts %s" .) -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Get Security Context.
|
||||
*/}}
|
||||
{{- define "getSecurityContext" }}
|
||||
securityContext:
|
||||
runAsUser: {{ required "runAsUser cannot be empty" .Values.securityContext.runAsUser }}
|
||||
runAsGroup: {{ .Values.securityContext.runAsGroup | default 0 }}
|
||||
runAsNonRoot: {{ .Values.securityContext.runAsNonRoot }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Get TServer/Master flags to start yugabyted.
|
||||
*/}}
|
||||
{{- define "getYbdbFlags" -}}
|
||||
{{- $flagsList := "" -}}
|
||||
{{- if .flags -}}
|
||||
{{- range $key, $value := .flags -}}
|
||||
{{- if not $flagsList -}}
|
||||
{{- $flagsList = printf "%s=%v" $key $value -}}
|
||||
{{- else -}}
|
||||
{{- $flagsList = printf "%s,%s=%v" $flagsList $key $value -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- printf $flagsList -}}
|
||||
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Make list of custom http headers
|
||||
*/}}
|
||||
{{- define "customHeaders" -}}
|
||||
[
|
||||
{{- $headers := .Values.yugaware.custom_headers -}}
|
||||
{{- range $index, $element := $headers -}}
|
||||
{{- if ne $index (sub (len $headers) 1) -}}
|
||||
{{- . | quote }},
|
||||
{{- else -}}
|
||||
{{- . | quote }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
]
|
||||
{{- end -}}
|
|
@ -0,0 +1,99 @@
|
|||
# Copyright (c) YugaByte, Inc.
|
||||
|
||||
{{- $root := . }}
|
||||
{{- $tls := $root.Values.tls }}
|
||||
{{- if and $tls.enabled $tls.certManager.enabled }}
|
||||
{{- if $tls.certManager.genSelfsigned }}
|
||||
{{- if $tls.certManager.useClusterIssuer }}
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: {{ $root.Release.Name }}-yugaware-cluster-issuer
|
||||
spec:
|
||||
selfSigned: {}
|
||||
{{- else }} # useClusterIssuer=false
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Issuer
|
||||
metadata:
|
||||
name: {{ $root.Release.Name }}-yugaware-issuer
|
||||
namespace: {{ $root.Release.Namespace }}
|
||||
spec:
|
||||
selfSigned: {}
|
||||
---
|
||||
{{- end }} # useClusterIssuer
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: {{ $root.Release.Name }}-yugaware-ui-root-ca
|
||||
namespace: {{ $root.Release.Namespace }}
|
||||
spec:
|
||||
isCA: true
|
||||
commonName: Yugaware self signed CA
|
||||
secretName: {{ .Release.Name }}-yugaware-root-ca
|
||||
secretTemplate:
|
||||
labels:
|
||||
app: "{{ template "yugaware.name" . }}"
|
||||
chart: "{{ template "yugaware.chart" . }}"
|
||||
release: {{ .Release.Name | quote }}
|
||||
heritage: {{ .Release.Service | quote }}
|
||||
duration: {{ $tls.certManager.configuration.duration | quote }}
|
||||
renewBefore: {{ $tls.certManager.configuration.renewBefore | quote }}
|
||||
privateKey:
|
||||
algorithm: {{ $tls.certManager.configuration.algorithm | quote }}
|
||||
encoding: PKCS8
|
||||
size: {{ $tls.certManager.configuration.keySize }}
|
||||
rotationPolicy: Always
|
||||
issuerRef:
|
||||
{{- if $tls.certManager.useClusterIssuer }}
|
||||
name: {{ $root.Release.Name }}-yugaware-cluster-issuer
|
||||
kind: ClusterIssuer
|
||||
{{- else }}
|
||||
name: {{ $root.Release.Name }}-yugaware-issuer
|
||||
kind: Issuer
|
||||
{{- end }}
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Issuer
|
||||
metadata:
|
||||
name: {{ $root.Release.Name }}-yugaware-ca-issuer
|
||||
namespace: {{ $root.Release.Namespace }}
|
||||
spec:
|
||||
ca:
|
||||
secretName: {{ .Release.Name }}-yugaware-root-ca
|
||||
---
|
||||
{{- end }} # genSelfsigned
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: {{ $root.Release.Name }}-yugaware-ui-tls
|
||||
namespace: {{ $root.Release.Namespace }}
|
||||
spec:
|
||||
isCA: false
|
||||
commonName: {{ $tls.hostname }}
|
||||
secretName: {{ .Release.Name }}-yugaware-tls-cert
|
||||
secretTemplate:
|
||||
labels:
|
||||
app: "{{ template "yugaware.name" . }}"
|
||||
chart: "{{ template "yugaware.chart" . }}"
|
||||
release: {{ .Release.Name | quote }}
|
||||
heritage: {{ .Release.Service | quote }}
|
||||
duration: {{ $tls.certManager.configuration.duration | quote }}
|
||||
renewBefore: {{ $tls.certManager.configuration.renewBefore | quote }}
|
||||
privateKey:
|
||||
algorithm: {{ $tls.certManager.configuration.algorithm | quote }}
|
||||
encoding: PKCS8
|
||||
size: {{ $tls.certManager.configuration.keySize }}
|
||||
rotationPolicy: Always
|
||||
issuerRef:
|
||||
name: {{ $tls.certManager.genSelfsigned | ternary (printf "%s%s" $root.Release.Name "-yugaware-ca-issuer") ($tls.certManager.useClusterIssuer | ternary $tls.certManager.clusterIssuer $tls.certManager.issuer) }}
|
||||
{{- if $tls.certManager.useClusterIssuer }}
|
||||
kind: ClusterIssuer
|
||||
{{- else }}
|
||||
kind: Issuer
|
||||
{{- end }}
|
||||
---
|
||||
{{- end }}
|
|
@ -0,0 +1,619 @@
|
|||
# Copyright (c) YugaByte, Inc.
|
||||
|
||||
{{- if .Values.image.pullSecretFile }}
|
||||
---
|
||||
apiVersion: v1
|
||||
data:
|
||||
.dockerconfigjson: {{ $.Files.Get .Values.image.pullSecretFile | b64enc }}
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ .Values.image.pullSecret }}
|
||||
type: kubernetes.io/dockerconfigjson
|
||||
{{- end }}
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-yugaware-app-config
|
||||
labels:
|
||||
app: {{ template "yugaware.name" . }}
|
||||
chart: {{ template "yugaware.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Values.helm2Legacy | ternary "Tiller" (.Release.Service | quote) }}
|
||||
data:
|
||||
application.docker.conf: |
|
||||
include classpath("application.common.conf")
|
||||
play.crypto.secret=${APP_SECRET}
|
||||
play.i18n.langs = [ "en" ]
|
||||
pidfile.path = "/dev/null"
|
||||
play.logger.includeConfigProperties=true
|
||||
log.override.path = "/opt/yugabyte/yugaware/data/logs"
|
||||
|
||||
db {
|
||||
default.dbname=${POSTGRES_DB}
|
||||
{{ if and (not .Values.useYugabyteDB) .Values.postgres.external.host }}
|
||||
default.host="{{ .Values.postgres.external.host }}"
|
||||
default.port={{ .Values.postgres.external.port }}
|
||||
{{ else if eq .Values.ip_version_support "v6_only" }}
|
||||
default.host="[::1]"
|
||||
{{ else }}
|
||||
default.host="127.0.0.1"
|
||||
{{ end }}
|
||||
{{- if .Values.useYugabyteDB }}
|
||||
default.port={{ .Values.yugabytedb.config.ysqlPort | default "5433" }}
|
||||
{{- end }}
|
||||
default.url="jdbc:postgresql://"${db.default.host}":"${db.default.port}"/"${db.default.dbname}${db.default.params}
|
||||
default.params="{{ .Values.jdbcParams }}"
|
||||
default.username=${POSTGRES_USER}
|
||||
default.password=${POSTGRES_PASSWORD}
|
||||
{{ if .Values.yugaware.cloud.enabled }}
|
||||
perf_advisor.driver="org.hsqldb.jdbc.JDBCDriver"
|
||||
perf_advisor.url="jdbc:hsqldb:mem:perf-advisor"
|
||||
perf_advisor.createDatabaseIfMissing=false
|
||||
perf_advisor.username="sa"
|
||||
perf_advisor.password="sa"
|
||||
perf_advisor.migration.auto=false
|
||||
perf_advisor.migration.disabled=true
|
||||
{{ else }}
|
||||
perf_advisor.url="jdbc:postgresql://"${db.default.host}":"${db.default.port}"/"${db.perf_advisor.dbname}${db.default.params}
|
||||
perf_advisor.createDatabaseUrl="jdbc:postgresql://"${db.default.host}":"${db.default.port}"/"${db.default.dbname}${db.default.params}
|
||||
{{ end }}
|
||||
}
|
||||
|
||||
{{- if .Values.tls.enabled }}
|
||||
https.port = 9443
|
||||
play.server.https.keyStore {
|
||||
path = /opt/certs/server.pem
|
||||
type = PEM
|
||||
}
|
||||
{{- end }}
|
||||
|
||||
yb {
|
||||
{{- if .Values.yugaware.universe_boot_script }}
|
||||
universe_boot_script = "/data/universe-boot-script.sh"
|
||||
{{- end }}
|
||||
cloud.enabled = {{ .Values.yugaware.cloud.enabled }}
|
||||
cloud.requestIdHeader = "{{ .Values.yugaware.cloud.requestIdHeader }}"
|
||||
devops.home = /opt/yugabyte/devops
|
||||
metrics.host = "{{ eq .Values.ip_version_support "v6_only" | ternary "[::1]" "127.0.0.1" }}"
|
||||
metrics.url = "http://"${yb.metrics.host}":9090/api/v1"
|
||||
metrics.management.url = "http://"${yb.metrics.host}":9090/-"
|
||||
storage.path = /opt/yugabyte/yugaware/data
|
||||
docker.network = bridge
|
||||
seedData = false
|
||||
swamper.targetPath = /opt/yugabyte/prometheus/targets
|
||||
swamper.rulesPath = /opt/yugabyte/prometheus/rules
|
||||
security.enable_auth_for_proxy_metrics = {{ .Values.yugaware.enableProxyMetricsAuth }}
|
||||
proxy_endpoint_timeout = {{ .Values.yugaware.proxyEndpointTimeoutMs }}
|
||||
multiTenant = {{ .Values.yugaware.multiTenant }}
|
||||
releases.path = "/opt/yugabyte/releases"
|
||||
docker.release = "/opt/yugabyte/release"
|
||||
# TODO(bogdan): need this extra level for installing from local...
|
||||
thirdparty.packagePath = /opt/third-party
|
||||
helm.packagePath = "{{ .Values.helm.packagePath }}"
|
||||
helm.timeout_secs = {{ .Values.helm.timeout }}
|
||||
health.check_interval_ms = 300000
|
||||
health.status_interval_ms = 43200000
|
||||
health.default_email = "{{ .Values.yugaware.health.email }}"
|
||||
health.ses_email_username = "{{ .Values.yugaware.health.username }}"
|
||||
health.ses_email_password = "{{ .Values.yugaware.health.password }}"
|
||||
kubernetes.storageClass = "{{ .Values.yugaware.storageClass }}"
|
||||
kubernetes.yugawareImageRepository = "{{ .Values.image.repository }}"
|
||||
kubernetes.yugawareImageTag = "{{ .Values.image.tag }}"
|
||||
kubernetes.pullSecretName = "{{ .Values.image.pullSecret }}"
|
||||
kubernetes.operator.enabled = "{{ .Values.yugaware.kubernetesOperatorEnabled }}"
|
||||
kubernetes.operator.namespace = "{{ .Values.yugaware.kubernetesOperatorNamespace }}"
|
||||
kubernetes.operator.crash_yba_on_operator_failure = "{{ .Values.yugaware.kubernetesOperatorCrashOnFailure }}"
|
||||
url = "https://{{ .Values.tls.hostname }}"
|
||||
# GKE MCS takes 7 to 10 minutes to setup DNS
|
||||
wait_for_server_timeout = 15 minutes
|
||||
{{- if .Values.tls.enabled }}
|
||||
security.headers.hsts_enabled = true
|
||||
{{- end }}
|
||||
security.headers.custom_headers = {{ include "customHeaders" . }}
|
||||
{{- if eq .Values.ip_version_support "v6_only" }}
|
||||
env_proxy_selector.enabled = false
|
||||
{{- end }}
|
||||
}
|
||||
|
||||
play.filters {
|
||||
# CSRF config
|
||||
csrf {
|
||||
cookie {
|
||||
# If non null, the CSRF token will be placed in a cookie with this name
|
||||
name = "csrfCookie"
|
||||
# Whether the cookie should be set to secure
|
||||
secure = false
|
||||
# Whether the cookie should have the HTTP only flag set
|
||||
httpOnly = false
|
||||
}
|
||||
# Whether to bypass CSRF check if CORS check is satisfied
|
||||
bypassCorsTrustedOrigins = false
|
||||
header {
|
||||
# The name of the header to accept CSRF tokens from.
|
||||
name = "Csrf-Token"
|
||||
}
|
||||
}
|
||||
# CORS config
|
||||
cors {
|
||||
pathPrefixes = ["/"]
|
||||
allowedOrigins = {{ include "allowedCorsOrigins" . }}
|
||||
# Server allows cookies/credentials to be sent with cross-origin requests
|
||||
supportsCredentials=true
|
||||
allowedHttpMethods = ["GET", "POST", "PUT", "OPTIONS", "DELETE"]
|
||||
allowedHttpHeaders = ["Accept", "Origin", "Content-Type", "X-Auth-Token", "X-AUTH-YW-API-TOKEN", "{{ .Values.yugaware.cloud.requestIdHeader }}", ${play.filters.csrf.header.name}]
|
||||
}
|
||||
}
|
||||
|
||||
# string config entries from helm values additionalAppConf
|
||||
{{- range $key, $value := .Values.additionalAppConf.stringConf }}
|
||||
{{ $key }} = "{{ $value }}"
|
||||
{{- end }}
|
||||
|
||||
# boolean/int config entries from helm values additionalAppConf
|
||||
{{- range $key, $value := .Values.additionalAppConf.nonStringConf }}
|
||||
{{ $key }} = {{ $value }}
|
||||
{{- end }}
|
||||
{{- if and .Values.tls.enabled (not .Values.tls.certManager.enabled) }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-yugaware-tls-pem
|
||||
labels:
|
||||
app: "{{ template "yugaware.name" . }}"
|
||||
chart: "{{ template "yugaware.chart" . }}"
|
||||
release: {{ .Release.Name | quote }}
|
||||
heritage: {{ .Release.Service | quote }}
|
||||
type: Opaque
|
||||
data:
|
||||
{{- include "getOrCreateServerPem" (dict "Namespace" .Release.Namespace "Root" . "Name" (printf "%s%s" .Release.Name "-yugaware-tls-pem")) | nindent 2 }}
|
||||
{{- end }}
|
||||
|
||||
{{- if not .Values.useYugabyteDB }}
|
||||
---
|
||||
{{- if not (and (.Values.ocpCompatibility.enabled) (eq .Values.image.postgres.registry "registry.redhat.io")) }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-yugaware-pg-upgrade
|
||||
labels:
|
||||
app: {{ template "yugaware.name" . }}
|
||||
chart: {{ template "yugaware.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Values.helm2Legacy | ternary "Tiller" (.Release.Service | quote) }}
|
||||
data:
|
||||
pg-upgrade-11-to-14.sh: |
|
||||
#!/bin/bash
|
||||
set -x -o errexit
|
||||
|
||||
cd /pg_upgrade_logs/
|
||||
if [ ! "$(ls -A ${PGDATANEW})" ] && [ "$(ls -A ${PGDATAOLD})" ];
|
||||
then
|
||||
echo "Upgrading PG data from ${PGDATAOLD} to ${PGDATANEW}"
|
||||
# if fsGroup is set, we need to remove the sticky bit, and group
|
||||
# write permission from the directories
|
||||
chmod -R g-w-s "${PGDATAOLD}"
|
||||
chmod g-w-s "${PGDATAOLD}"
|
||||
docker-upgrade pg_upgrade | tee -a /pg_upgrade_logs/pg_upgrade_11_to_14.log;
|
||||
echo "host all all all scram-sha-256" >> "${PGDATANEW}/pg_hba.conf";
|
||||
fi
|
||||
{{- end }}
|
||||
{{- if .Values.securityContext.enabled }}
|
||||
---
|
||||
apiVersion: "v1"
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-yugaware-pg-prerun
|
||||
labels:
|
||||
app: {{ template "yugaware.name" . }}
|
||||
chart: {{ template "yugaware.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Values.helm2Legacy | ternary "Tiller" (.Release.Service | quote) }}
|
||||
data:
|
||||
pg-prerun.sh: |
|
||||
#!/bin/bash
|
||||
set -x -o errexit
|
||||
|
||||
mkdir -p $PGDATA && chown -R $PG_UID:$PG_GID $PGDATA;
|
||||
{{- end }}
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-pg-sample-config
|
||||
labels:
|
||||
app: {{ template "yugaware.name" . }}
|
||||
chart: {{ template "yugaware.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Values.helm2Legacy | ternary "Tiller" (.Release.Service | quote) }}
|
||||
data:
|
||||
postgresql.conf.sample: |
|
||||
{{- range $conf_key, $conf_value := .Values.postgres.sampleConfig }}
|
||||
{{ $conf_key }} = {{ squote $conf_value }}
|
||||
{{- end }}
|
||||
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.prometheus.remoteWrite.tls.enabled }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-yugaware-prometheus-remote-write-tls
|
||||
labels:
|
||||
app: {{ template "yugaware.name" . }}
|
||||
chart: {{ template "yugaware.chart" . }}
|
||||
release: {{ .Release.Name | quote }}
|
||||
heritage: {{ .Values.helm2Legacy | ternary "Tiller" (.Release.Service | quote) }}
|
||||
type: Opaque
|
||||
data:
|
||||
# For user-provided remote write ca cert, cert and key. Expect to be base-64 encoded.
|
||||
{{- if .Values.prometheus.remoteWrite.tls.caCert }}
|
||||
ca.crt: {{ .Values.prometheus.remoteWrite.tls.caCert }}
|
||||
{{- end }}
|
||||
{{- if .Values.prometheus.remoteWrite.tls.clientCert }}
|
||||
client.crt: {{ .Values.prometheus.remoteWrite.tls.clientCert }}
|
||||
{{- end }}
|
||||
{{- if .Values.prometheus.remoteWrite.tls.clientKey }}
|
||||
client.key: {{ .Values.prometheus.remoteWrite.tls.clientKey }}
|
||||
{{- end }}
|
||||
{{- end}}
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-yugaware-prometheus-config
|
||||
labels:
|
||||
app: {{ template "yugaware.name" . }}
|
||||
chart: {{ template "yugaware.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Values.helm2Legacy | ternary "Tiller" (.Release.Service | quote) }}
|
||||
data:
|
||||
prometheus.yml: |
|
||||
global:
|
||||
scrape_interval: 10s
|
||||
evaluation_interval: 10s
|
||||
rule_files:
|
||||
- '/opt/yugabyte/prometheus/rules/yugaware.ad.*.yml'
|
||||
- '/opt/yugabyte/prometheus/rules/yugaware.recording-rules.yml'
|
||||
{{- if .Values.prometheus.remoteWrite.config }}
|
||||
remote_write:
|
||||
{{ toYaml .Values.prometheus.remoteWrite.config | indent 6}}
|
||||
{{- end }}
|
||||
scrape_configs:
|
||||
{{- if .Values.ocpCompatibility.enabled }}
|
||||
- job_name: "ocp-prometheus-federated"
|
||||
scheme: https
|
||||
|
||||
tls_config:
|
||||
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
insecure_skip_verify: true
|
||||
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
|
||||
honor_labels: true
|
||||
metrics_path: "/federate"
|
||||
|
||||
params:
|
||||
'match[]':
|
||||
# kubelet metrics
|
||||
- 'kubelet_volume_stats_used_bytes{persistentvolumeclaim=~"(.*)-yb-(.*)"}'
|
||||
- 'kubelet_volume_stats_capacity_bytes{persistentvolumeclaim=~"(.*)-yb-(.*)"}'
|
||||
# kubelet cadvisor metrics
|
||||
- 'container_cpu_usage_seconds_total{pod=~"(.*)yb-(.*)"}'
|
||||
- 'container_memory_working_set_bytes{pod=~"(.*)yb-(.*)"}'
|
||||
# kube-state-metrics
|
||||
# Supports >= OCP v4.4
|
||||
# OCP v4.4 has upgraded the KSM from 1.8.0 to 1.9.5.
|
||||
# https://docs.openshift.com/container-platform/4.4/release_notes/ocp-4-4-release-notes.html#ocp-4-4-cluster-monitoring-version-updates
|
||||
# - 'kube_pod_container_resource_requests_cpu_cores{pod=~"(.*)yb-(.*)"}'
|
||||
- 'kube_pod_container_resource_requests{pod=~"(.*)yb-(.*)", unit="core"}'
|
||||
|
||||
static_configs:
|
||||
- targets:
|
||||
- "prometheus-k8s.openshift-monitoring.svc:9091"
|
||||
|
||||
metric_relabel_configs:
|
||||
# Save the name of the metric so we can group_by since we cannot by __name__ directly...
|
||||
- source_labels: ["__name__"]
|
||||
regex: "(.*)"
|
||||
target_label: "saved_name"
|
||||
replacement: "$1"
|
||||
- source_labels: ["pod"]
|
||||
regex: "(.*)"
|
||||
target_label: "pod_name"
|
||||
replacement: "$1"
|
||||
- source_labels: ["container"]
|
||||
regex: "(.*)"
|
||||
target_label: "container_name"
|
||||
replacement: "$1"
|
||||
# rename new name of the CPU metric to the old name and label
|
||||
# ref: https://github.com/kubernetes/kube-state-metrics/blob/master/CHANGELOG.md#v200-alpha--2020-09-16
|
||||
- source_labels: ["__name__", "unit"]
|
||||
regex: "kube_pod_container_resource_requests;core"
|
||||
target_label: "__name__"
|
||||
replacement: "kube_pod_container_resource_requests_cpu_cores"
|
||||
|
||||
{{- else }}
|
||||
{{- if .Values.prometheus.scrapeKubernetesNodes }}
|
||||
|
||||
- job_name: 'kubernetes-nodes'
|
||||
|
||||
scheme: https
|
||||
|
||||
tls_config:
|
||||
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
|
||||
kubernetes_sd_configs:
|
||||
- role: node
|
||||
|
||||
relabel_configs:
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_node_label_(.+)
|
||||
- target_label: __address__
|
||||
replacement: kubernetes.default.svc:443
|
||||
- source_labels: [__meta_kubernetes_node_name]
|
||||
regex: (.+)
|
||||
target_label: __metrics_path__
|
||||
replacement: /api/v1/nodes/${1}/proxy/metrics
|
||||
metric_relabel_configs:
|
||||
# Only keep the metrics which we care about
|
||||
- source_labels: ["__name__"]
|
||||
regex: "kubelet_volume_stats_used_bytes|kubelet_volume_stats_capacity_bytes"
|
||||
action: keep
|
||||
- source_labels: ["persistentvolumeclaim"]
|
||||
regex: "(.*)-yb-(.*)"
|
||||
action: keep
|
||||
# Save the name of the metric so we can group_by since we cannot by __name__ directly...
|
||||
- source_labels: ["__name__"]
|
||||
regex: "(.*)"
|
||||
target_label: "saved_name"
|
||||
replacement: "$1"
|
||||
- source_labels: ["pod"]
|
||||
regex: "(.*)"
|
||||
target_label: "pod_name"
|
||||
replacement: "$1"
|
||||
- source_labels: ["container"]
|
||||
regex: "(.*)"
|
||||
target_label: "container_name"
|
||||
replacement: "$1"
|
||||
|
||||
- job_name: 'kube-state-metrics'
|
||||
static_configs:
|
||||
- targets: ['kube-state-metrics.kube-system.svc.{{.Values.domainName}}:8080']
|
||||
metric_relabel_configs:
|
||||
# Only keep the metrics which we care about
|
||||
- source_labels: ["__name__", "unit"]
|
||||
regex: "kube_pod_container_resource_requests;core"
|
||||
action: keep
|
||||
# Save the name of the metric so we can group_by since we cannot by __name__ directly...
|
||||
- source_labels: ["__name__"]
|
||||
regex: "(.*)"
|
||||
target_label: "saved_name"
|
||||
replacement: "$1"
|
||||
- source_labels: ["pod"]
|
||||
regex: "(.*)"
|
||||
target_label: "pod_name"
|
||||
replacement: "$1"
|
||||
- source_labels: ["container"]
|
||||
regex: "(.*)"
|
||||
target_label: "container_name"
|
||||
replacement: "$1"
|
||||
# Keep metrics from YugabyteDB pods, discard everything else
|
||||
- source_labels: ["pod_name"]
|
||||
regex: "(.*)yb-(.*)"
|
||||
action: keep
|
||||
# rename new name of the CPU metric to the old name and label
|
||||
# ref: https://github.com/kubernetes/kube-state-metrics/blob/master/CHANGELOG.md#v200-alpha--2020-09-16
|
||||
- source_labels: ["__name__", "unit"]
|
||||
regex: "kube_pod_container_resource_requests;core"
|
||||
target_label: "__name__"
|
||||
replacement: "kube_pod_container_resource_requests_cpu_cores"
|
||||
# Keep metrics for CPU, discard duplicate metrics
|
||||
- source_labels: ["__name__"]
|
||||
regex: "kube_pod_container_resource_requests_cpu_cores"
|
||||
action: keep
|
||||
|
||||
- job_name: 'kubernetes-cadvisor'
|
||||
|
||||
scheme: https
|
||||
|
||||
tls_config:
|
||||
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
|
||||
kubernetes_sd_configs:
|
||||
- role: node
|
||||
|
||||
relabel_configs:
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_node_label_(.+)
|
||||
- target_label: __address__
|
||||
replacement: kubernetes.default.svc:443
|
||||
- source_labels: [__meta_kubernetes_node_name]
|
||||
regex: (.+)
|
||||
target_label: __metrics_path__
|
||||
replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor
|
||||
metric_relabel_configs:
|
||||
# Only keep the metrics which we care about
|
||||
- source_labels: ["__name__"]
|
||||
regex: "container_cpu_usage_seconds_total|container_memory_working_set_bytes"
|
||||
action: keep
|
||||
# Save the name of the metric so we can group_by since we cannot by __name__ directly...
|
||||
- source_labels: ["__name__"]
|
||||
regex: "(.*)"
|
||||
target_label: "saved_name"
|
||||
replacement: "$1"
|
||||
- source_labels: ["pod"]
|
||||
regex: "(.*)"
|
||||
target_label: "pod_name"
|
||||
replacement: "$1"
|
||||
- source_labels: ["container"]
|
||||
regex: "(.*)"
|
||||
target_label: "container_name"
|
||||
replacement: "$1"
|
||||
# Keep metrics from YugabyteDB pods, discard everything else
|
||||
- source_labels: ["pod_name"]
|
||||
regex: "(.*)yb-(.*)"
|
||||
action: keep
|
||||
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- if and (not .Values.useNginxProxy) (.Values.tls.enabled) }}
|
||||
|
||||
- job_name: 'platform'
|
||||
metrics_path: "/api/v1/prometheus_metrics"
|
||||
scheme: https
|
||||
tls_config:
|
||||
insecure_skip_verify: true
|
||||
static_configs:
|
||||
- targets: [
|
||||
'{{ eq .Values.ip_version_support "v6_only" | ternary "[::1]" "127.0.0.1" }}:9443'
|
||||
]
|
||||
|
||||
{{- else }}
|
||||
|
||||
- job_name: 'platform'
|
||||
metrics_path: "/api/v1/prometheus_metrics"
|
||||
static_configs:
|
||||
- targets: [
|
||||
'{{ eq .Values.ip_version_support "v6_only" | ternary "[::1]" "127.0.0.1" }}:9000'
|
||||
]
|
||||
|
||||
{{- end }}
|
||||
|
||||
|
||||
|
||||
{{- if .Values.prometheus.selfMonitor }}
|
||||
|
||||
- job_name: 'prometheus'
|
||||
metrics_path: "/metrics"
|
||||
static_configs:
|
||||
- targets: ['127.0.0.1:9090']
|
||||
{{- end }}
|
||||
|
||||
- job_name: 'node-agent'
|
||||
metrics_path: "/metrics"
|
||||
file_sd_configs:
|
||||
- files:
|
||||
- '/opt/yugabyte/prometheus/targets/node-agent.*.json'
|
||||
|
||||
- job_name: "node"
|
||||
file_sd_configs:
|
||||
- files:
|
||||
- '/opt/yugabyte/prometheus/targets/node.*.json'
|
||||
metric_relabel_configs:
|
||||
# Below relabels are required for smooth migration from node_exporter 0.13.0 to the latest
|
||||
- source_labels: ["__name__"]
|
||||
regex: "node_cpu"
|
||||
target_label: "__name__"
|
||||
replacement: "node_cpu_seconds_total"
|
||||
- source_labels: ["__name__"]
|
||||
regex: "node_filesystem_free"
|
||||
target_label: "__name__"
|
||||
replacement: "node_filesystem_free_bytes"
|
||||
- source_labels: ["__name__"]
|
||||
regex: "node_filesystem_size"
|
||||
target_label: "__name__"
|
||||
replacement: "node_filesystem_size_bytes"
|
||||
- source_labels: ["__name__"]
|
||||
regex: "node_disk_reads_completed"
|
||||
target_label: "__name__"
|
||||
replacement: "node_disk_reads_completed_total"
|
||||
- source_labels: ["__name__"]
|
||||
regex: "node_disk_writes_completed"
|
||||
target_label: "__name__"
|
||||
replacement: "node_disk_writes_completed_total"
|
||||
- source_labels: ["__name__"]
|
||||
regex: "node_memory_MemTotal"
|
||||
target_label: "__name__"
|
||||
replacement: "node_memory_MemTotal_bytes"
|
||||
- source_labels: ["__name__"]
|
||||
regex: "node_memory_Slab"
|
||||
target_label: "__name__"
|
||||
replacement: "node_memory_Slab_bytes"
|
||||
- source_labels: ["__name__"]
|
||||
regex: "node_memory_Cached"
|
||||
target_label: "__name__"
|
||||
replacement: "node_memory_Cached_bytes"
|
||||
- source_labels: ["__name__"]
|
||||
regex: "node_memory_Buffers"
|
||||
target_label: "__name__"
|
||||
replacement: "node_memory_Buffers_bytes"
|
||||
- source_labels: ["__name__"]
|
||||
regex: "node_memory_MemFree"
|
||||
target_label: "__name__"
|
||||
replacement: "node_memory_MemFree_bytes"
|
||||
- source_labels: ["__name__"]
|
||||
regex: "node_network_receive_bytes"
|
||||
target_label: "__name__"
|
||||
replacement: "node_network_receive_bytes_total"
|
||||
- source_labels: ["__name__"]
|
||||
regex: "node_network_transmit_bytes"
|
||||
target_label: "__name__"
|
||||
replacement: "node_network_transmit_bytes_total"
|
||||
- source_labels: ["__name__"]
|
||||
regex: "node_network_receive_packets"
|
||||
target_label: "__name__"
|
||||
replacement: "node_network_receive_packets_total"
|
||||
- source_labels: ["__name__"]
|
||||
regex: "node_network_transmit_packets"
|
||||
target_label: "__name__"
|
||||
replacement: "node_network_transmit_packets_total"
|
||||
- source_labels: ["__name__"]
|
||||
regex: "node_network_receive_errs"
|
||||
target_label: "__name__"
|
||||
replacement: "node_network_receive_errs_total"
|
||||
- source_labels: ["__name__"]
|
||||
regex: "node_network_transmit_errs"
|
||||
target_label: "__name__"
|
||||
replacement: "node_network_transmit_errs_total"
|
||||
- source_labels: ["__name__"]
|
||||
regex: "node_disk_bytes_read"
|
||||
target_label: "__name__"
|
||||
replacement: "node_disk_read_bytes_total"
|
||||
- source_labels: ["__name__"]
|
||||
regex: "node_disk_bytes_written"
|
||||
target_label: "__name__"
|
||||
replacement: "node_disk_written_bytes_total"
|
||||
# Save the name of the metric so we can group_by since we cannot by __name__ directly...
|
||||
- source_labels: ["__name__"]
|
||||
regex: "(.*)"
|
||||
target_label: "saved_name"
|
||||
replacement: "$1"
|
||||
|
||||
- job_name: "yugabyte"
|
||||
tls_config:
|
||||
insecure_skip_verify: true
|
||||
metrics_path: "/prometheus-metrics"
|
||||
file_sd_configs:
|
||||
- files:
|
||||
- '/opt/yugabyte/prometheus/targets/yugabyte.*.json'
|
||||
metric_relabel_configs:
|
||||
# Save the name of the metric so we can group_by since we cannot by __name__ directly...
|
||||
- source_labels: ["__name__"]
|
||||
regex: "(.*)"
|
||||
target_label: "saved_name"
|
||||
replacement: "$1"
|
||||
# The following basically retrofit the handler_latency_* metrics to label format.
|
||||
- source_labels: ["__name__"]
|
||||
regex: "handler_latency_(yb_[^_]*)_([^_]*)_([^_]*)(.*)"
|
||||
target_label: "server_type"
|
||||
replacement: "$1"
|
||||
- source_labels: ["__name__"]
|
||||
regex: "handler_latency_(yb_[^_]*)_([^_]*)_([^_]*)(.*)"
|
||||
target_label: "service_type"
|
||||
replacement: "$2"
|
||||
- source_labels: ["__name__"]
|
||||
regex: "handler_latency_(yb_[^_]*)_([^_]*)_([^_]*)(_sum|_count)?"
|
||||
target_label: "service_method"
|
||||
replacement: "$3"
|
||||
- source_labels: ["__name__"]
|
||||
regex: "handler_latency_(yb_[^_]*)_([^_]*)_([^_]*)(_sum|_count)?"
|
||||
target_label: "__name__"
|
||||
replacement: "rpc_latency$4"
|
|
@ -0,0 +1,46 @@
|
|||
{{- if .Values.yugaware.defaultUser.enabled -}}
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: {{ include "yugaware.fullname" . }}-customer-create-job
|
||||
namespace: "{{ .Release.Namespace }}"
|
||||
labels:
|
||||
app: "customer-create"
|
||||
release: {{ .Release.Name | quote }}
|
||||
chart: "{{ .Chart.Name }}"
|
||||
component: "{{ .Values.Component }}"
|
||||
annotations:
|
||||
"helm.sh/hook": post-install
|
||||
"helm.sh/hook-weight": "0"
|
||||
"helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
name: {{ include "yugaware.fullname" . }}-customer-create-job
|
||||
labels:
|
||||
app: "customer-create"
|
||||
release: {{ .Release.Name | quote }}
|
||||
chart: "{{ .Chart.Name }}"
|
||||
component: "{{ .Values.Component }}"
|
||||
spec:
|
||||
restartPolicy: Never
|
||||
containers:
|
||||
- name: post-install-job
|
||||
image: {{ include "full_yugaware_image" . }}
|
||||
command:
|
||||
- "curl"
|
||||
- "-X"
|
||||
- "POST"
|
||||
- "--url"
|
||||
- "{{ .Release.Name }}-yugaware-ui/api/register"
|
||||
- "--header"
|
||||
- "Content-Type: application/json"
|
||||
- "--data"
|
||||
- '{"email": "{{ .Values.yugaware.defaultUser.email}}",
|
||||
{{- if eq .Values.yugaware.defaultUser.password ""}}
|
||||
{{- fail ".Values.yugaware.defaultUser.password cannot be empty string"}}
|
||||
{{- end }}
|
||||
"password": "{{ .Values.yugaware.defaultUser.password }}",
|
||||
"code": "operator",
|
||||
"name": "{{ .Values.yugaware.defaultUser.username }}"}'
|
||||
{{- end -}}
|
|
@ -0,0 +1,23 @@
|
|||
# Copyright (c) YugaByte, Inc.
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-yugaware-global-config
|
||||
labels:
|
||||
app: {{ template "yugaware.name" . }}
|
||||
chart: {{ template "yugaware.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Values.helm2Legacy | ternary "Tiller" (.Release.Service | quote) }}
|
||||
data:
|
||||
{{- if .Values.postgres.external.host }}
|
||||
postgres_db: {{ .Values.postgres.external.dbname | b64enc | quote }}
|
||||
postgres_user: {{ .Values.postgres.external.user | b64enc | quote }}
|
||||
postgres_password: {{ .Values.postgres.external.pass | b64enc | quote }}
|
||||
{{- else }}
|
||||
postgres_db: {{ .Values.postgres.dbname | b64enc | quote }}
|
||||
postgres_user: {{ .Values.postgres.user | b64enc | quote }}
|
||||
postgres_password: {{ include "getOrGeneratePasswordConfigMapToSecret" (dict "Namespace" .Release.Namespace "Name" (printf "%s%s" .Release.Name "-yugaware-global-config") "Key" "postgres_password") | quote }}
|
||||
{{- end }}
|
||||
app_secret: {{ randAlphaNum 64 | b64enc | b64enc | quote }}
|
|
@ -0,0 +1,69 @@
|
|||
{{- if .Values.securityContext.enabled }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-yugaware-init
|
||||
labels:
|
||||
app: {{ template "yugaware.name" . }}
|
||||
chart: {{ template "yugaware.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Values.helm2Legacy | ternary "Tiller" (.Release.Service | quote) }}
|
||||
data:
|
||||
init-permissions.sh: |
|
||||
#!/bin/bash
|
||||
|
||||
set -xe -o pipefail
|
||||
|
||||
recreate_files() {
|
||||
local source_directory="$1"
|
||||
local backup_suffix="$2"
|
||||
local target_directory="$(dirname "${source_directory}")/${backup_suffix}"
|
||||
local temp_directory="$(dirname "${source_directory}")/${backup_suffix}-$(date +%s)"
|
||||
|
||||
echo "Creating copy of ${source_directory} directory"
|
||||
cp -r "${source_directory}" "${target_directory}"
|
||||
|
||||
echo "Renaming existing directory"
|
||||
mv "${source_directory}" "${temp_directory}"
|
||||
|
||||
echo "Renaming target directory source directory"
|
||||
mv "${target_directory}" "${source_directory}"
|
||||
}
|
||||
|
||||
data_directory="/opt/yugabyte/yugaware/data"
|
||||
if [[ -d "${data_directory}/keys/" ]]; then
|
||||
pemfiles=$(find "${data_directory}/keys/" -name "*.pem" -exec stat -c "%a" {} + | uniq | tr '\n' ',')
|
||||
IFS="," read -r -a pemfile_perms <<< "${pemfiles}"
|
||||
|
||||
trigger=false
|
||||
echo "Finding pem files with permissions different than 400, and setting their permissions to 400."
|
||||
|
||||
for pemfile in "${pemfile_perms[@]}"; do
|
||||
if [[ "${pemfile}" != *400* ]]; then
|
||||
echo "Found a pem file with permissions ${pemfile}"
|
||||
trigger=true
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if ${trigger}; then
|
||||
echo "Creating copy of data/keys directory"
|
||||
cp -r "${data_directory}/keys" "${data_directory}/new_keys"
|
||||
|
||||
echo "Setting permission of all pem files to 400"
|
||||
find "${data_directory}/new_keys/" -name "*.pem" -exec chmod 400 {} +
|
||||
|
||||
echo "Renaming existing keys directory"
|
||||
mv "${data_directory}/keys" "${data_directory}/keys-$(date +%s)"
|
||||
|
||||
echo "Renaming new keys directory"
|
||||
mv "${data_directory}/new_keys" "${data_directory}/keys"
|
||||
else
|
||||
echo "All pem files already have permission set to 400"
|
||||
fi
|
||||
fi
|
||||
# Update provision_instance.py script to correct permissions.
|
||||
if [[ -d "/opt/yugabyte/yugaware/data/provision/" ]]; then
|
||||
recreate_files "/opt/yugabyte/yugaware/data/provision/" "backup_provision"
|
||||
fi
|
||||
{{- end }}
|
|
@ -0,0 +1,19 @@
|
|||
{{/*
|
||||
TODO: switch to policy/v1 completely when we stop supporting
|
||||
Kubernetes versions < 1.21
|
||||
*/}}
|
||||
{{- if .Values.pdbPolicyVersionOverride }}
|
||||
apiVersion: policy/{{ .Values.pdbPolicyVersionOverride }}
|
||||
{{- else if .Capabilities.APIVersions.Has "policy/v1/PodDisruptionBudget" }}
|
||||
apiVersion: policy/v1
|
||||
{{- else }}
|
||||
apiVersion: policy/v1beta1
|
||||
{{- end }}
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-yugaware-pdb
|
||||
spec:
|
||||
maxUnavailable: {{ .Values.yugaware.podDisruptionBudget.maxUnavailable | toJson }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ .Release.Name }}-yugaware
|
|
@ -0,0 +1,240 @@
|
|||
{{- if and (not .Values.rbac.create) (.Values.yugaware.kubernetesOperatorEnabled) }}
|
||||
{{- fail "Must use rbac if Kubernetes operator is enabled" }}
|
||||
{{- end }}
|
||||
{{ if not .Values.yugaware.serviceAccount }}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ .Release.Name }}
|
||||
labels:
|
||||
k8s-app: yugaware
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
{{- if .Values.yugaware.serviceAccountAnnotations }}
|
||||
annotations:
|
||||
{{ toYaml .Values.yugaware.serviceAccountAnnotations | indent 4 }}
|
||||
{{- end }}
|
||||
{{ end }}
|
||||
{{- if .Values.rbac.create }}
|
||||
{{- if .Values.ocpCompatibility.enabled }}
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-cluster-monitoring-view
|
||||
labels:
|
||||
app: yugaware
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ .Values.yugaware.serviceAccount | default .Release.Name }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: cluster-monitoring-view
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
{{- else }}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ .Release.Name }}
|
||||
rules:
|
||||
# Set of permissions required for operator
|
||||
- apiGroups:
|
||||
- operator.yugabyte.io
|
||||
resources:
|
||||
- "*"
|
||||
verbs:
|
||||
- "get"
|
||||
- "create"
|
||||
- "delete"
|
||||
- "patch"
|
||||
- "list"
|
||||
- "watch"
|
||||
- "update"
|
||||
# Set of permissions required to install, upgrade, delete the yugabyte chart
|
||||
- apiGroups:
|
||||
- "policy"
|
||||
resources:
|
||||
- "poddisruptionbudgets"
|
||||
verbs:
|
||||
- "get"
|
||||
- "create"
|
||||
- "delete"
|
||||
- "patch"
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- "services"
|
||||
verbs:
|
||||
- "get"
|
||||
- "delete"
|
||||
- "create"
|
||||
- "patch"
|
||||
- apiGroups:
|
||||
- "apps"
|
||||
resources:
|
||||
- "statefulsets"
|
||||
verbs:
|
||||
- "get"
|
||||
- "list"
|
||||
- "delete"
|
||||
- "create"
|
||||
- "patch"
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- "secrets"
|
||||
verbs:
|
||||
- "create"
|
||||
- "list"
|
||||
- "get"
|
||||
- "delete"
|
||||
- "update"
|
||||
- "patch"
|
||||
- apiGroups:
|
||||
- "cert-manager.io"
|
||||
resources:
|
||||
- "certificates"
|
||||
verbs:
|
||||
- "create"
|
||||
- "delete"
|
||||
- "get"
|
||||
- "patch"
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- "configmaps"
|
||||
verbs:
|
||||
- "get"
|
||||
- "create"
|
||||
- "patch"
|
||||
- "delete"
|
||||
# Set of permissions required by YBA to manage YB DB universes
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- "namespaces"
|
||||
verbs:
|
||||
- "delete"
|
||||
- "create"
|
||||
- "patch"
|
||||
- "get"
|
||||
- "list"
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- "pods"
|
||||
verbs:
|
||||
- "get"
|
||||
- "list"
|
||||
- "delete"
|
||||
- "watch"
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- "services"
|
||||
verbs:
|
||||
- "get"
|
||||
- "list"
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- "persistentvolumeclaims"
|
||||
verbs:
|
||||
- "get"
|
||||
- "patch"
|
||||
- "list"
|
||||
- "delete"
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- "pods/exec"
|
||||
verbs:
|
||||
- "create"
|
||||
- apiGroups:
|
||||
- "apps"
|
||||
resources:
|
||||
- "statefulsets/scale"
|
||||
verbs:
|
||||
- "patch"
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- "events"
|
||||
verbs:
|
||||
- "list"
|
||||
# required to scrape resource metrics like CPU, memory, etc.
|
||||
# required to validate zones during provider creation
|
||||
# required to auto-fill the zone labels for provider creation
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- "nodes"
|
||||
verbs:
|
||||
- "list"
|
||||
- "get"
|
||||
- "watch"
|
||||
# required to scrape resource metrics like CPU, memory, etc.
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- "nodes/proxy"
|
||||
verbs:
|
||||
- "get"
|
||||
# Ref: https://github.com/yugabyte/charts/commit/4a5319972385666487a7bc2cd0c35052f2cfa4c5
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- "events"
|
||||
verbs:
|
||||
- "get"
|
||||
- "list"
|
||||
- "watch"
|
||||
- "create"
|
||||
- "update"
|
||||
- "patch"
|
||||
- "delete"
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- "configmaps"
|
||||
verbs:
|
||||
- "list"
|
||||
- "watch"
|
||||
- "update"
|
||||
# required to validate storage class during provider creation and
|
||||
# volume expansion
|
||||
- apiGroups:
|
||||
- "storage.k8s.io"
|
||||
resources:
|
||||
- "storageclasses"
|
||||
verbs:
|
||||
- "get"
|
||||
# required to validate existance of issuer during provider creation.
|
||||
- apiGroups:
|
||||
- "cert-manager.io"
|
||||
resources:
|
||||
- "issuers"
|
||||
- "clusterissuers"
|
||||
verbs:
|
||||
- "get"
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ .Release.Name }}
|
||||
labels:
|
||||
k8s-app: yugaware
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ .Values.yugaware.serviceAccount | default .Release.Name }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: {{ .Release.Name }}
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -0,0 +1,97 @@
|
|||
# Copyright (c) YugaByte, Inc.
|
||||
|
||||
{{- if .Values.yugaware.service.enabled }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-yugaware-ui
|
||||
{{- if .Values.yugaware.service.annotations }}
|
||||
annotations:
|
||||
{{ toYaml .Values.yugaware.service.annotations | indent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app: {{ .Release.Name }}-yugaware
|
||||
chart: {{ template "yugaware.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Values.helm2Legacy | ternary "Tiller" (.Release.Service | quote) }}
|
||||
spec:
|
||||
{{- if eq .Release.Service "Tiller" }}
|
||||
clusterIP:
|
||||
{{- else }}
|
||||
{{- if .Values.yugaware.service.clusterIP }}
|
||||
clusterIP: .Values.yugaware.service.clusterIP
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
ports:
|
||||
{{- if .Values.tls.enabled }}
|
||||
- name: ui-tls
|
||||
port: 443
|
||||
targetPort: 9443
|
||||
{{- end }}
|
||||
- name: ui
|
||||
port: 80
|
||||
targetPort: 9000
|
||||
- name: metrics
|
||||
port: 9090
|
||||
selector:
|
||||
app: {{ .Release.Name }}-yugaware
|
||||
type: "{{ .Values.yugaware.service.type }}"
|
||||
{{- if and (eq .Values.yugaware.service.type "LoadBalancer") (.Values.yugaware.service.ip) }}
|
||||
loadBalancerIP: "{{ .Values.yugaware.service.ip }}"
|
||||
{{- end }}
|
||||
{{- if .Values.yugaware.service.loadBalancerSourceRanges }}
|
||||
loadBalancerSourceRanges:
|
||||
{{- toYaml .Values.yugaware.service.loadBalancerSourceRanges | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.yugaware.serviceMonitor.enabled }}
|
||||
---
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-yugaware
|
||||
{{- if .Values.yugaware.serviceMonitor.annotations }}
|
||||
annotations:
|
||||
{{ toYaml .Values.yugaware.serviceMonitor.annotations | indent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app: {{ .Release.Name }}-yugaware
|
||||
chart: {{ template "yugaware.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Values.helm2Legacy | ternary "Tiller" (.Release.Service | quote) }}
|
||||
spec:
|
||||
endpoints:
|
||||
- port: ui # Scrape the Platform itself instead of bundled Prometheus
|
||||
path: api/v1/prometheus_metrics
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ .Release.Name }}-yugaware
|
||||
{{- end }}
|
||||
{{- if and (not .Values.useYugabyteDB) .Values.postgres.service.enabled }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-postgres
|
||||
{{- if .Values.postgres.service.annotations }}
|
||||
annotations:
|
||||
{{ toYaml .Values.postgres.service.annotations | indent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app: {{ .Release.Name }}-yugaware
|
||||
chart: {{ template "yugaware.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Values.helm2Legacy | ternary "Tiller" (.Release.Service | quote) }}
|
||||
spec:
|
||||
ports:
|
||||
- name: postgres
|
||||
port: 5432
|
||||
targetPort: 5432
|
||||
selector:
|
||||
app: {{ .Release.Name }}-yugaware
|
||||
type: {{ .Values.postgres.service.type }}
|
||||
{{- if and (eq .Values.postgres.service.type "LoadBalancer") (.Values.postgres.service.ip) }}
|
||||
loadBalancerIP: "{{ .Values.postgres.service.ip }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -0,0 +1,569 @@
|
|||
# Copyright (c) YugaByte, Inc.
|
||||
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-yugaware
|
||||
labels:
|
||||
app: {{ .Release.Name }}-yugaware
|
||||
chart: {{ template "yugaware.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Values.helm2Legacy | ternary "Tiller" (.Release.Service | quote) }}
|
||||
spec:
|
||||
serviceName: {{ .Release.Name }}-yugaware
|
||||
replicas: {{ .Values.yugaware.replicas }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ .Release.Name }}-yugaware
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
checksum/config: {{ include (print $.Template.BasePath "/configs.yaml") . | sha256sum }}
|
||||
{{- if .Values.yugaware.pod.annotations }}
|
||||
{{ toYaml .Values.yugaware.pod.annotations | indent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app: {{ .Release.Name }}-yugaware
|
||||
{{- if .Values.yugaware.pod.labels }}
|
||||
{{ toYaml .Values.yugaware.pod.labels | indent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
terminationGracePeriodSeconds: {{ .Values.yugaware.pod.terminationGracePeriodSeconds }}
|
||||
serviceAccountName: {{ .Values.yugaware.serviceAccount | default .Release.Name }}
|
||||
imagePullSecrets:
|
||||
- name: {{ .Values.image.pullSecret }}
|
||||
{{- if .Values.securityContext.enabled }}
|
||||
securityContext:
|
||||
fsGroup: {{ .Values.securityContext.fsGroup }}
|
||||
{{- if (semverCompare ">=1.20-x" .Capabilities.KubeVersion.Version) }}
|
||||
fsGroupChangePolicy: {{ .Values.securityContext.fsGroupChangePolicy }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml .Values.nodeSelector | indent 8}}
|
||||
{{- end }}
|
||||
{{- if .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- with .Values.tolerations }}{{ toYaml . | nindent 8 }}{{ end }}
|
||||
{{- end }}
|
||||
{{- if .Values.zoneAffinity }}
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: failure-domain.beta.kubernetes.io/zone
|
||||
operator: In
|
||||
values:
|
||||
{{ toYaml .Values.zoneAffinity | indent 18 }}
|
||||
- matchExpressions:
|
||||
- key: topology.kubernetes.io/zone
|
||||
operator: In
|
||||
values:
|
||||
{{ toYaml .Values.zoneAffinity | indent 18 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: yugaware-storage
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ .Release.Name }}-yugaware-storage
|
||||
- name: yugaware-ui
|
||||
emptyDir: {}
|
||||
- name: yugaware-config
|
||||
projected:
|
||||
sources:
|
||||
- configMap:
|
||||
name: {{ .Release.Name }}-yugaware-app-config
|
||||
items:
|
||||
- key: application.docker.conf
|
||||
path: application.docker.conf
|
||||
{{- if .Values.yugaware.universe_boot_script }}
|
||||
- configMap:
|
||||
name: {{ .Release.Name }}-universe-boot-script
|
||||
items:
|
||||
- key: universe_boot_script
|
||||
path: universe-boot-script.sh
|
||||
{{- end }}
|
||||
- name: prometheus-config
|
||||
configMap:
|
||||
name: {{ .Release.Name }}-yugaware-prometheus-config
|
||||
items:
|
||||
- key: prometheus.yml
|
||||
path: prometheus.yml
|
||||
{{- if .Values.securityContext.enabled }}
|
||||
- name: init-container-script
|
||||
configMap:
|
||||
name: {{ .Release.Name }}-yugaware-init
|
||||
items:
|
||||
- key: init-permissions.sh
|
||||
path: init-permissions.sh
|
||||
{{- end }}
|
||||
{{- if .Values.tls.enabled }}
|
||||
- name: {{ .Release.Name }}-yugaware-tls-pem
|
||||
secret:
|
||||
secretName: {{ .Release.Name }}-yugaware-tls-pem
|
||||
{{- end }}
|
||||
{{- if .Values.prometheus.remoteWrite.tls.enabled }}
|
||||
- name: {{ .Release.Name }}-yugaware-prometheus-remote-write-tls
|
||||
secret:
|
||||
secretName: {{ .Release.Name }}-yugaware-prometheus-remote-write-tls
|
||||
{{- end }}
|
||||
{{- if not .Values.useYugabyteDB }}
|
||||
{{- if not (and (.Values.ocpCompatibility.enabled) (eq .Values.image.postgres.registry "registry.redhat.io")) }}
|
||||
- name: pg-upgrade-11-to-14
|
||||
configMap:
|
||||
name: {{ .Release.Name }}-yugaware-pg-upgrade
|
||||
items:
|
||||
- key: pg-upgrade-11-to-14.sh
|
||||
path: pg-upgrade-11-to-14.sh
|
||||
{{- end }}
|
||||
- name: pg-init
|
||||
configMap:
|
||||
name: {{ .Release.Name }}-yugaware-pg-prerun
|
||||
items:
|
||||
- key: pg-prerun.sh
|
||||
path: pg-prerun.sh
|
||||
- name: pg-sample-config
|
||||
configMap:
|
||||
name: {{ .Release.Name }}-pg-sample-config
|
||||
items:
|
||||
- key: postgresql.conf.sample
|
||||
path: postgresql.conf.sample
|
||||
{{- if .Values.postgres.extraVolumes -}}
|
||||
{{- include "yugaware.isExtraVolumesMappingExists" .Values.postgres -}}
|
||||
{{- .Values.postgres.extraVolumes | toYaml | nindent 8 -}}
|
||||
{{ end }}
|
||||
{{- if .Values.yugaware.extraVolumes -}}
|
||||
{{- include "yugaware.isExtraVolumesMappingExists" .Values.yugaware -}}
|
||||
{{- .Values.yugaware.extraVolumes | toYaml | nindent 8 -}}
|
||||
{{ end }}
|
||||
{{- else }}
|
||||
- name: ybdb-init
|
||||
configMap:
|
||||
name: {{ .Release.Name }}-yugaware-ybdb
|
||||
items:
|
||||
- key: ybdb-prerun.sh
|
||||
path: ybdb-prerun.sh
|
||||
- name: ybdb-create-yugaware-db
|
||||
configMap:
|
||||
name: {{ .Release.Name }}-yugaware-ybdb
|
||||
items:
|
||||
- key: ybdb-create-yugaware-db.sh
|
||||
path: ybdb-create-yugaware-db.sh
|
||||
{{- end}}
|
||||
{{- with .Values.dnsConfig }}
|
||||
dnsConfig: {{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.dnsPolicy }}
|
||||
dnsPolicy: {{ . | quote }}
|
||||
{{- end }}
|
||||
initContainers:
|
||||
- image: {{ include "full_yugaware_image" . }}
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
{{- if .Values.initContainers.prometheusConfiguration.resources }}
|
||||
resources: {{- toYaml .Values.initContainers.prometheusConfiguration.resources | nindent 12 }}
|
||||
{{ end -}}
|
||||
name: prometheus-configuration
|
||||
{{- if .Values.securityContext.enabled }}
|
||||
command:
|
||||
- 'bash'
|
||||
- '-c'
|
||||
- |
|
||||
cp /default_prometheus_config/prometheus.yml /prometheus_configs/prometheus.yml && /bin/bash /init-container/init-permissions.sh;
|
||||
{{- include "getSecurityContext" . | indent 10 }}
|
||||
{{- else }}
|
||||
command: ["cp", "/default_prometheus_config/prometheus.yml", "/prometheus_configs/prometheus.yml"]
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- name: prometheus-config
|
||||
mountPath: /default_prometheus_config
|
||||
- name: yugaware-storage
|
||||
mountPath: /prometheus_configs
|
||||
subPath: prometheus.yml
|
||||
{{- if .Values.securityContext.enabled }}
|
||||
- name: yugaware-storage
|
||||
mountPath: /opt/yugabyte/yugaware/data/
|
||||
subPath: data
|
||||
- name: init-container-script
|
||||
mountPath: /init-container
|
||||
{{- end }}
|
||||
{{- if not .Values.useYugabyteDB }}
|
||||
{{- if not (and (.Values.ocpCompatibility.enabled) (eq .Values.image.postgres.registry "registry.redhat.io")) }}
|
||||
- image: {{ include "full_image" (dict "containerName" "postgres-upgrade" "root" .) }}
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
name: postgres-upgrade
|
||||
{{- if .Values.initContainers.postgresUpgrade.resources }}
|
||||
resources: {{- toYaml .Values.initContainers.postgresUpgrade.resources | nindent 12 }}
|
||||
{{ end -}}
|
||||
command:
|
||||
- 'bash'
|
||||
- '-c'
|
||||
- /bin/bash /pg_upgrade_11_to_14/pg-upgrade-11-to-14.sh;
|
||||
env:
|
||||
- name: PGDATANEW
|
||||
value: /var/lib/postgresql/14/pgdata
|
||||
- name: PGDATAOLD
|
||||
value: /var/lib/postgresql/11/pgdata
|
||||
# https://github.com/tianon/docker-postgres-upgrade/issues/10#issuecomment-523020113
|
||||
- name: PGUSER
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-yugaware-global-config
|
||||
key: postgres_user
|
||||
- name: POSTGRES_INITDB_ARGS
|
||||
value: "-U $PGUSER"
|
||||
volumeMounts:
|
||||
- name: yugaware-storage
|
||||
mountPath: /var/lib/postgresql/11/
|
||||
subPath: postgres_data
|
||||
- name: yugaware-storage
|
||||
mountPath: /var/lib/postgresql/14/
|
||||
subPath: postgres_data_14
|
||||
- name: pg-upgrade-11-to-14
|
||||
mountPath: /pg_upgrade_11_to_14
|
||||
- name: yugaware-storage
|
||||
mountPath: /pg_upgrade_logs
|
||||
subPath: postgres_data_14
|
||||
{{- end }}
|
||||
{{- if .Values.securityContext.enabled }}
|
||||
- image: {{ include "full_image" (dict "containerName" "postgres" "root" .) }}
|
||||
name: postgres-init
|
||||
{{- if .Values.initContainers.dbInit.resources }}
|
||||
resources: {{- toYaml .Values.initContainers.dbInit.resources | nindent 12 }}
|
||||
{{ end -}}
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
command: ["/bin/bash", "/pg_prerun/pg-prerun.sh"]
|
||||
env:
|
||||
- name: PGDATA
|
||||
value: /var/lib/postgresql/data/pgdata
|
||||
- name: PG_UID
|
||||
value: {{ .Values.securityContext.runAsUser | quote }}
|
||||
- name: PG_GID
|
||||
value: {{ .Values.securityContext.runAsGroup | quote }}
|
||||
volumeMounts:
|
||||
- name: yugaware-storage
|
||||
mountPath: /var/lib/postgresql/data
|
||||
subPath: postgres_data_14
|
||||
- name: pg-init
|
||||
mountPath: /pg_prerun
|
||||
{{- end }}
|
||||
{{- else }}
|
||||
- image: {{ include "full_image" (dict "containerName" "ybdb" "root" .) }}
|
||||
name: ybdb-init
|
||||
{{- if .Values.initContainers.dbInit.resources }}
|
||||
resources: {{- toYaml .Values.initContainers.dbInit.resources | nindent 12 }}
|
||||
{{ end -}}
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
command: ["/bin/bash", "/ybdb_prerun/ybdb-prerun.sh"]
|
||||
env:
|
||||
- name: YBDB_DATA
|
||||
value: /var/lib/ybdb
|
||||
{{- if .Values.securityContext.enabled }}
|
||||
- name: YBDB_UID
|
||||
value: {{ .Values.securityContext.runAsUser | quote }}
|
||||
- name: YBDB_GID
|
||||
value: {{ .Values.securityContext.runAsGroup | quote }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- name: yugaware-storage
|
||||
mountPath: /var/lib/ybdb
|
||||
subPath: ybdb_data
|
||||
- name: ybdb-init
|
||||
mountPath: /ybdb_prerun
|
||||
{{- end }}
|
||||
containers:
|
||||
{{ if not ( or .Values.useYugabyteDB .Values.postgres.external.host) }}
|
||||
- name: postgres
|
||||
image: {{ include "full_image" (dict "containerName" "postgres" "root" .) }}
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
{{- if .Values.securityContext.enabled }}
|
||||
{{- include "getSecurityContext" . | indent 10 }}
|
||||
{{- end }}
|
||||
env:
|
||||
- name: POSTGRES_USER
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-yugaware-global-config
|
||||
key: postgres_user
|
||||
- name: POSTGRES_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-yugaware-global-config
|
||||
key: postgres_password
|
||||
- name: POSTGRES_DB
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-yugaware-global-config
|
||||
key: postgres_db
|
||||
{{- if and (.Values.ocpCompatibility.enabled) (eq .Values.image.postgres.registry "registry.redhat.io") }}
|
||||
# Hardcoded the POSTGRESQL_USER because it's mandatory env var in RH PG image
|
||||
# It doesn't have access to create the DB, so YBA fails to create the perf_advisor DB.
|
||||
# Need to use admin user of RH PG image (postgres)
|
||||
# Changing the user name won't be possible moving forward for OpenShift certified chart
|
||||
- name: POSTGRESQL_USER
|
||||
value: pg-yba
|
||||
# valueFrom:
|
||||
# secretKeyRef:
|
||||
# name: {{ .Release.Name }}-yugaware-global-config
|
||||
# key: postgres_user
|
||||
- name: POSTGRESQL_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-yugaware-global-config
|
||||
key: postgres_password
|
||||
- name: POSTGRESQL_ADMIN_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-yugaware-global-config
|
||||
key: postgres_password
|
||||
- name: POSTGRESQL_DATABASE
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-yugaware-global-config
|
||||
key: postgres_db
|
||||
{{- else }}
|
||||
# The RH Postgres image doesn't allow this directory to be changed.
|
||||
- name: PGDATA
|
||||
value: /var/lib/postgresql/data/pgdata
|
||||
{{- end }}
|
||||
ports:
|
||||
- containerPort: 5432
|
||||
name: postgres
|
||||
|
||||
{{- if .Values.postgres.resources }}
|
||||
resources:
|
||||
{{ toYaml .Values.postgres.resources | indent 12 }}
|
||||
{{ end }}
|
||||
|
||||
volumeMounts:
|
||||
- name: yugaware-storage
|
||||
{{- if and (.Values.ocpCompatibility.enabled) (eq .Values.image.postgres.registry "registry.redhat.io") }}
|
||||
mountPath: /var/lib/pgsql/data
|
||||
subPath: postgres_data_13
|
||||
{{- else }}
|
||||
mountPath: /var/lib/postgresql/data
|
||||
subPath: postgres_data_14
|
||||
{{- end }}
|
||||
- name: pg-sample-config
|
||||
mountPath: {{ .Values.image.postgres.sampleConfLocation }}
|
||||
subPath: postgresql.conf.sample
|
||||
{{- if .Values.postgres.extraVolumeMounts -}}
|
||||
{{- include "yugaware.isExtraVolumesMappingExists" .Values.postgres -}}
|
||||
{{- .Values.postgres.extraVolumeMounts | toYaml | nindent 12 -}}
|
||||
{{- end -}}
|
||||
{{ end }}
|
||||
|
||||
# Check if yugabytedb is enabled.
|
||||
{{- if .Values.useYugabyteDB }}
|
||||
- name: ybdb
|
||||
image: {{ include "full_image" (dict "containerName" "ybdb" "root" .) }}
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
{{- if .Values.securityContext.enabled }}
|
||||
{{- include "getSecurityContext" . | indent 10 }}
|
||||
{{- end }}
|
||||
env:
|
||||
- name: POSTGRES_USER
|
||||
value: {{ .Values.yugabytedb.user }}
|
||||
- name: POSTGRES_PASSWORD
|
||||
value: ""
|
||||
- name: POSTGRES_DB
|
||||
value: {{ .Values.yugabytedb.dbname }}
|
||||
ports:
|
||||
- containerPort: {{ .Values.yugabytedb.config.ysqlPort | default "5433" }}
|
||||
name: ybdb
|
||||
- containerPort: 15433
|
||||
name: yugabyted-ui
|
||||
|
||||
{{- if .Values.yugabytedb.resources }}
|
||||
resources:
|
||||
{{ toYaml .Values.yugabytedb.resources | nindent 12 }}
|
||||
{{- end }}
|
||||
|
||||
volumeMounts:
|
||||
- name: yugaware-storage
|
||||
mountPath: /var/lib/ybdb
|
||||
subPath: ybdb_data
|
||||
- name: ybdb-create-yugaware-db
|
||||
mountPath: /ybdb-create-yugware-db
|
||||
|
||||
command: ["/home/yugabyte/bin/yugabyted", "start"]
|
||||
args:
|
||||
- --base_dir=/var/lib/ybdb
|
||||
- --daemon=false
|
||||
- --advertise_address={{ .Values.yugabytedb.config.advertiseAddress | default "127.0.0.1" }}
|
||||
- --ysql_port={{ .Values.yugabytedb.config.ysqlPort | default "5433" }}
|
||||
- --tserver_webserver_port={{ .Values.yugabytedb.config.tserverWebserverPort }}
|
||||
- --ui={{ .Values.yugabytedb.service.enabled }}
|
||||
- --tserver_flags={{ include "getYbdbFlags" (dict "flags" .Values.yugabytedb.config.tserverFlags)}}
|
||||
- --master_flags={{ include "getYbdbFlags" (dict "flags" .Values.yugabytedb.config.masterFlags)}}
|
||||
# Wait for YBDB to become healthy, and create yugaware db.
|
||||
lifecycle:
|
||||
postStart:
|
||||
exec:
|
||||
command: ["/bin/bash", "/ybdb-create-yugware-db/ybdb-create-yugaware-db.sh"]
|
||||
{{- end }}
|
||||
|
||||
- name: prometheus
|
||||
image: {{ include "full_image" (dict "containerName" "prometheus" "root" .) }}
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
{{- if .Values.securityContext.enabled }}
|
||||
{{- include "getSecurityContext" . | nindent 10 }}
|
||||
{{- else if (not .Values.ocpCompatibility.enabled) }}
|
||||
securityContext:
|
||||
runAsUser: 0
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.prometheus.resources }}
|
||||
resources:
|
||||
{{ toYaml .Values.prometheus.resources | indent 12 }}
|
||||
{{ end }}
|
||||
{{- with .Values.prometheus.extraEnv }}
|
||||
env:
|
||||
{{- toYaml . | nindent 10 }}
|
||||
{{- end }}
|
||||
|
||||
volumeMounts:
|
||||
- name: yugaware-storage
|
||||
mountPath: /prometheus_configs
|
||||
subPath: prometheus.yml
|
||||
- name: yugaware-storage
|
||||
mountPath: /prometheus/
|
||||
- mountPath: /opt/yugabyte/yugaware/data/keys/
|
||||
name: yugaware-storage
|
||||
subPath: data/keys
|
||||
{{- if .Values.prometheus.scrapeNodes }}
|
||||
- name: yugaware-storage
|
||||
mountPath: /opt/yugabyte/prometheus/targets
|
||||
subPath: swamper_targets
|
||||
{{- end }}
|
||||
{{- if .Values.prometheus.evaluateAlertRules }}
|
||||
- name: yugaware-storage
|
||||
mountPath: /opt/yugabyte/prometheus/rules
|
||||
subPath: swamper_rules
|
||||
{{- end }}
|
||||
{{- if .Values.prometheus.remoteWrite.tls.enabled }}
|
||||
- name: {{ .Release.Name }}-yugaware-prometheus-remote-write-tls
|
||||
mountPath: /opt/remote_write/certs/
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
args:
|
||||
- --config.file=/prometheus_configs/prometheus.yml
|
||||
- --storage.tsdb.path=/prometheus/
|
||||
- --web.enable-admin-api
|
||||
- --web.enable-lifecycle
|
||||
- --storage.tsdb.retention.time={{ .Values.prometheus.retentionTime }}
|
||||
- --query.max-concurrency={{ .Values.prometheus.queryConcurrency }}
|
||||
- --query.max-samples={{ .Values.prometheus.queryMaxSamples }}
|
||||
- --query.timeout={{ .Values.prometheus.queryTimeout }}
|
||||
ports:
|
||||
- containerPort: 9090
|
||||
- name: yugaware
|
||||
image: {{ include "full_yugaware_image" . }}
|
||||
{{- if .Values.securityContext.enabled }}
|
||||
{{- include "getSecurityContext" . | nindent 10 }}
|
||||
{{- end }}
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
|
||||
{{- if .Values.yugaware.resources }}
|
||||
resources:
|
||||
{{ toYaml .Values.yugaware.resources | indent 12 }}
|
||||
{{- end }}
|
||||
args: ["bin/yugaware","-Dconfig.file=/data/application.docker.conf"]
|
||||
env:
|
||||
# Conditionally set these env variables, if runAsUser is not 0(root)
|
||||
# or 10001(yugabyte).
|
||||
{{- if eq (include "checkNssWrapperExportRequired" .) "true" }}
|
||||
- name: NSS_WRAPPER_GROUP
|
||||
value: "/tmp/group.template"
|
||||
- name: NSS_WRAPPER_PASSWD
|
||||
value: "/tmp/passwd.template"
|
||||
- name: LD_PRELOAD
|
||||
value: "/usr/lib64/libnss_wrapper.so"
|
||||
{{- end }}
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POSTGRES_USER
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-yugaware-global-config
|
||||
key: postgres_user
|
||||
- name: POSTGRES_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-yugaware-global-config
|
||||
key: postgres_password
|
||||
- name: POSTGRES_DB
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-yugaware-global-config
|
||||
key: postgres_db
|
||||
- name: APP_SECRET
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-yugaware-global-config
|
||||
key: app_secret
|
||||
{{- with .Values.yugaware.extraEnv }}{{ toYaml . | nindent 12 }}{{ end }}
|
||||
ports:
|
||||
- containerPort: 9000
|
||||
name: yugaware
|
||||
{{- if .Values.yugaware.pod.probes.enabled }}
|
||||
startupProbe:
|
||||
failureThreshold: {{ div .Values.yugaware.pod.probes.startupTimeSec.max (div .Values.yugaware.pod.probes.startupTimeSec.min 2) }}
|
||||
initialDelaySeconds: {{ .Values.yugaware.pod.probes.startupTimeSec.min }}
|
||||
periodSeconds: {{ div .Values.yugaware.pod.probes.startupTimeSec.min 2 }}
|
||||
timeoutSeconds: {{ div .Values.yugaware.pod.probes.startupTimeSec.min 4 }}
|
||||
httpGet:
|
||||
path: /api/app_version
|
||||
port: yugaware
|
||||
livenessProbe:
|
||||
failureThreshold: {{ .Values.yugaware.pod.probes.livenessProbe.failureThreshold }}
|
||||
periodSeconds: {{ .Values.yugaware.pod.probes.livenessProbe.periodSeconds }}
|
||||
timeoutSeconds: {{ div .Values.yugaware.pod.probes.livenessProbe.periodSeconds 2 }}
|
||||
httpGet:
|
||||
path: /api/app_version
|
||||
port: yugaware
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- name: yugaware-config
|
||||
mountPath: /data
|
||||
- name: yugaware-storage
|
||||
mountPath: /opt/yugabyte/yugaware/data/
|
||||
subPath: data
|
||||
# old path for backward compatibility
|
||||
- name: yugaware-storage
|
||||
mountPath: /opt/yugaware_data/
|
||||
subPath: data
|
||||
- name: yugaware-storage
|
||||
mountPath: /opt/yugabyte/releases/
|
||||
subPath: releases
|
||||
- name: yugaware-storage
|
||||
mountPath: /opt/yugabyte/ybc/releases/
|
||||
subPath: ybc_releases
|
||||
# old path for backward compatibility
|
||||
- name: yugaware-storage
|
||||
mountPath: /opt/releases/
|
||||
subPath: releases
|
||||
- name: yugaware-storage
|
||||
mountPath: /opt/yugabyte/prometheus/targets
|
||||
subPath: swamper_targets
|
||||
- name: yugaware-storage
|
||||
mountPath: /opt/yugabyte/prometheus/rules
|
||||
subPath: swamper_rules
|
||||
- name: yugaware-storage
|
||||
mountPath: /prometheus_configs
|
||||
subPath: prometheus.yml
|
||||
{{- if .Values.tls.enabled }}
|
||||
- name: {{ .Release.Name }}-yugaware-tls-pem
|
||||
mountPath: /opt/certs/
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
{{- if .Values.yugaware.extraVolumeMounts -}}
|
||||
{{- include "yugaware.isExtraVolumesMappingExists" .Values.yugaware -}}
|
||||
{{- .Values.yugaware.extraVolumeMounts | toYaml | nindent 10 -}}
|
||||
{{- end -}}
|
||||
{{ if .Values.sidecars }}
|
||||
{{ toYaml .Values.sidecars | indent 8 }}
|
||||
{{ end }}
|
|
@ -0,0 +1,38 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-yugaware-test
|
||||
labels:
|
||||
app: {{ .Release.Name }}-yugaware-test
|
||||
chart: {{ template "yugaware.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
annotations:
|
||||
"helm.sh/hook": test
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 30
|
||||
imagePullSecrets:
|
||||
- name: {{ .Values.image.pullSecret }}
|
||||
containers:
|
||||
- name: yugaware-test
|
||||
image: {{ include "full_yugaware_image" . }}
|
||||
command:
|
||||
- '/bin/bash'
|
||||
- '-ec'
|
||||
- >
|
||||
sleep 60s;
|
||||
{{- if .Values.tls.enabled }}
|
||||
- >
|
||||
curl --head -k https://{{ .Release.Name }}-yugaware-ui
|
||||
{{- else }}
|
||||
- >
|
||||
curl --head http://{{ .Release.Name }}-yugaware-ui
|
||||
{{- end }}
|
||||
# Hard coded resources to the test pod.
|
||||
resources:
|
||||
limits:
|
||||
cpu: "1"
|
||||
memory: "512Mi"
|
||||
requests:
|
||||
cpu: "0.5"
|
||||
memory: "256Mi"
|
||||
restartPolicy: Never
|
|
@ -0,0 +1,21 @@
|
|||
# Copyright (c) YugaByte, Inc.
|
||||
{{- if .Values.yugaware.universe_boot_script }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-universe-boot-script
|
||||
labels:
|
||||
app: {{ template "yugaware.name" . }}
|
||||
chart: {{ template "yugaware.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Values.helm2Legacy | ternary "Tiller" (.Release.Service | quote) }}
|
||||
data:
|
||||
universe_boot_script: |
|
||||
{{- if hasPrefix "#!" .Values.yugaware.universe_boot_script }}
|
||||
{{ tpl .Values.yugaware.universe_boot_script . | indent 4 }}
|
||||
{{- else }}
|
||||
{{ tpl (.Files.Get .Values.yugaware.universe_boot_script) . | indent 4 }}
|
||||
{{- end }}
|
||||
|
||||
{{- end }}
|
|
@ -0,0 +1,23 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-yugaware-storage
|
||||
{{- if .Values.yugaware.storageAnnotations }}
|
||||
annotations:
|
||||
{{ toYaml .Values.yugaware.storageAnnotations | indent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app: {{ template "yugaware.name" . }}
|
||||
chart: {{ template "yugaware.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Values.helm2Legacy | ternary "Tiller" (.Release.Service | quote) }}
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
{{- if .Values.yugaware.storageClass }}
|
||||
storageClassName: {{ .Values.yugaware.storageClass }}
|
||||
{{- end }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.yugaware.storage }}
|
|
@ -0,0 +1,60 @@
|
|||
# Copyright (c) YugaByte, Inc.
|
||||
|
||||
{{- if .Values.useYugabyteDB -}}
|
||||
---
|
||||
apiVersion: "v1"
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-yugaware-ybdb
|
||||
labels:
|
||||
app: {{ template "yugaware.name" . }}
|
||||
chart: {{ template "yugaware.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Values.helm2Legacy | ternary "Tiller" (.Release.Service | quote) }}
|
||||
data:
|
||||
ybdb-prerun.sh: |
|
||||
#!/bin/bash
|
||||
set -x -o errexit
|
||||
|
||||
{{- if .Values.securityContext.enabled }}
|
||||
mkdir -p $YBDB_DATA && chown -R $YBDB_UID:$YBDB_GID $YBDB_DATA;
|
||||
{{- else }}
|
||||
mkdir -p $YBDB_DATA;
|
||||
{{- end }}
|
||||
|
||||
ybdb-create-yugaware-db.sh: |
|
||||
#!/bin/bash
|
||||
set -x
|
||||
|
||||
run_ysql_command() {
|
||||
local command="$1"
|
||||
|
||||
/home/yugabyte/bin/ysqlsh \
|
||||
-U {{ .Values.yugabytedb.user }} \
|
||||
-h {{ .Values.yugabytedb.config.advertiseAddress | default "127.0.0.1" }} \
|
||||
-p {{ .Values.yugabytedb.config.ysqlPort | default "5433" }} \
|
||||
-c "$command"
|
||||
|
||||
return $?
|
||||
}
|
||||
|
||||
retry_counter={{ .Values.yugabytedb.config.startupRetryCount }}
|
||||
retry_sleep_time_sec=5
|
||||
|
||||
# Wait for ybdb to become healthy.
|
||||
while [[ $retry_counter -gt 0 ]] && ! run_ysql_command "SELECT VERSION();"; do
|
||||
sleep "$retry_sleep_time_sec"
|
||||
retry_counter=$(( retry_counter - 1 ))
|
||||
done
|
||||
|
||||
if [[ $retry_counter -eq 0 ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
create_db_cmd="CREATE DATABASE {{ .Values.yugabytedb.dbname }} WITH colocated = {{ .Values.yugabytedb.config.enableColocatedTables }};"
|
||||
# Create yugaware db if it doesn't exist.
|
||||
/home/yugabyte/bin/ysqlsh -U {{ .Values.yugabytedb.user }} -h {{ .Values.yugabytedb.config.advertiseAddress | default "127.0.0.1" }} \
|
||||
-p {{ .Values.yugabytedb.config.ysqlPort | default "5433" }} -tc "SELECT 1 FROM pg_database WHERE datname = '{{ .Values.yugabytedb.dbname }}'" | grep -q "1" || run_ysql_command "$create_db_cmd"
|
||||
|
||||
exit $?
|
||||
{{- end }}
|
|
@ -0,0 +1,27 @@
|
|||
{{- if and .Values.useYugabyteDB .Values.yugabytedb.service.enabled }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-yugabyted-ui
|
||||
{{- if .Values.yugabytedb.service.annotations }}
|
||||
annotations:
|
||||
{{ toYaml .Values.yugabytedb.service.annotations | indent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app: {{ .Release.Name }}-yugaware
|
||||
chart: {{ template "yugaware.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Values.helm2Legacy | ternary "Tiller" (.Release.Service | quote) }}
|
||||
spec:
|
||||
ports:
|
||||
- name: yugabyted-ui
|
||||
port: {{ .Values.yugabytedb.service.yugabytedUiPort }}
|
||||
targetPort: 15433
|
||||
selector:
|
||||
app: {{ .Release.Name }}-yugaware
|
||||
type: {{ .Values.yugabytedb.service.type }}
|
||||
{{- if and (eq .Values.yugabytedb.service.type "LoadBalancer") (.Values.yugabytedb.service.loadBalancerIP ) }}
|
||||
loadBalancerIP: "{{ .Values.yugabytedb.service.loadBalancerIP }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -0,0 +1,40 @@
|
|||
suite: Resources verification
|
||||
templates:
|
||||
- statefulset.yaml
|
||||
- configs.yaml
|
||||
tests:
|
||||
- it: YBA container
|
||||
template: statefulset.yaml
|
||||
asserts:
|
||||
- isNotEmpty:
|
||||
path: spec.template.spec.containers[?(@.name == "yugaware")].resources.requests
|
||||
|
||||
- it: Postgres container
|
||||
template: statefulset.yaml
|
||||
asserts:
|
||||
- isNotEmpty:
|
||||
path: spec.template.spec.containers[?(@.name == "postgres")].resources.requests
|
||||
|
||||
- it: Prometheus container
|
||||
template: statefulset.yaml
|
||||
asserts:
|
||||
- isNotEmpty:
|
||||
path: spec.template.spec.containers[?(@.name == "prometheus")].resources.requests
|
||||
|
||||
- it: Postgres-init initContainer
|
||||
template: statefulset.yaml
|
||||
asserts:
|
||||
- isNotEmpty:
|
||||
path: spec.template.spec.initContainers[?(@.name == "postgres-init")].resources.requests
|
||||
|
||||
- it: Prometheus-configuration initContainer
|
||||
template: statefulset.yaml
|
||||
asserts:
|
||||
- isNotEmpty:
|
||||
path: spec.template.spec.initContainers[?(@.name == "prometheus-configuration")].resources.requests
|
||||
|
||||
- it: Postgres-upgrade initContainer
|
||||
template: statefulset.yaml
|
||||
asserts:
|
||||
- isNotEmpty:
|
||||
path: spec.template.spec.initContainers[?(@.name == "postgres-upgrade")].resources.requests
|
|
@ -0,0 +1,461 @@
|
|||
# Default values for yugaware.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
fullnameOverride: ""
|
||||
nameOverride: ""
|
||||
|
||||
# Use YugabyteDB as the backend DB for YB Anywhere (Yugaware)
|
||||
# instead of PostgreSQL.
|
||||
useYugabyteDB: false
|
||||
|
||||
image:
|
||||
commonRegistry: ""
|
||||
# Setting commonRegistry to say, quay.io overrides the registry settings for all images
|
||||
# including the yugaware image
|
||||
|
||||
repository: quay.io/yugabyte/yugaware
|
||||
tag: 2024.1.3.0-b105
|
||||
pullPolicy: IfNotPresent
|
||||
pullSecret: yugabyte-k8s-pull-secret
|
||||
## Docker config JSON File name
|
||||
## If set, this file content will be used to automatically create secret named as above
|
||||
pullSecretFile: ""
|
||||
|
||||
postgres:
|
||||
# For non default postgres image, set postgres.sampleConfig values accordingly.
|
||||
registry: ""
|
||||
tag: '14.12'
|
||||
name: postgres
|
||||
# Postgres postgresql.conf.sample location on container. This will be used to mount
|
||||
# configmap with custom settings.
|
||||
sampleConfLocation: /usr/share/postgresql/postgresql.conf.sample
|
||||
|
||||
postgres-upgrade:
|
||||
registry: ""
|
||||
tag: "11-to-14"
|
||||
name: tianon/postgres-upgrade
|
||||
|
||||
prometheus:
|
||||
registry: ""
|
||||
tag: v2.47.1
|
||||
name: prom/prometheus
|
||||
|
||||
ybdb:
|
||||
registry: ""
|
||||
tag: 2.18.1.0-b84
|
||||
name: yugabytedb/yugabyte
|
||||
|
||||
|
||||
yugaware:
|
||||
replicas: 1
|
||||
storage: 100Gi
|
||||
storageClass: ""
|
||||
storageAnnotations: {}
|
||||
multiTenant: false
|
||||
## Name of existing ServiceAccount. When provided, the chart won't create a ServiceAccount.
|
||||
## It will attach the required RBAC roles to it.
|
||||
## Helpful in Yugabyte Platform GKE App.
|
||||
serviceAccount: ''
|
||||
serviceMonitor:
|
||||
enabled: false
|
||||
annotations: {}
|
||||
serviceAccountAnnotations: {}
|
||||
service:
|
||||
annotations: {}
|
||||
clusterIP: ""
|
||||
enabled: true
|
||||
ip: ""
|
||||
type: "LoadBalancer"
|
||||
## whitelist source CIDRs
|
||||
#loadBalancerSourceRanges:
|
||||
#- 0.0.0.0/0
|
||||
#- 192.168.100.0/24
|
||||
pod:
|
||||
annotations: {}
|
||||
labels: {}
|
||||
terminationGracePeriodSeconds: 30
|
||||
probes: # HTTP healthcheck probes configuration
|
||||
enabled: false # add or not livenessProbe and startupProbe to the container
|
||||
startupTimeSec:
|
||||
min: 20 # minimum expected time for yugaware to start
|
||||
max: 600 # maximum expected time for yugaware to start, k8s will restart pod on timeout
|
||||
livenessProbe:
|
||||
periodSeconds: 30 # query period
|
||||
failureThreshold: 10 # number of failed checks to restart pod
|
||||
health:
|
||||
username: ""
|
||||
password: ""
|
||||
email: ""
|
||||
resources:
|
||||
requests:
|
||||
cpu: "2"
|
||||
memory: 4Gi
|
||||
enableProxyMetricsAuth: true
|
||||
## List of additional alowed CORS origins in case of complex rev-proxy
|
||||
additionAllowedCorsOrigins: []
|
||||
proxyEndpointTimeoutMs: 3 minute
|
||||
## Enables features specific for cloud deployments
|
||||
cloud:
|
||||
enabled: false
|
||||
requestIdHeader: "X-REQUEST-ID"
|
||||
|
||||
podDisruptionBudget:
|
||||
# See https://kubernetes.io/docs/tasks/run-application/configure-pdb/
|
||||
# Note that the default of 0 doesn't really make sense since a StatefulSet isn't allowed to schedule extra replicas. However it is maintained as the default while we do additional testing. This value will likely change in the future.
|
||||
maxUnavailable: 0
|
||||
|
||||
universe_boot_script: ""
|
||||
# Control YBA k8s operator deployments
|
||||
kubernetesOperatorEnabled: false
|
||||
# Set namespace the operator watches for custom resources.
|
||||
# If default "" the YBA k8s operator watches all namespaces.
|
||||
kubernetesOperatorNamespace: ""
|
||||
# Crash YBA if operator thread is not initialised correctly
|
||||
kubernetesOperatorCrashOnFailure: true
|
||||
|
||||
# Create an initial user for YugabyteDB Anywhere. A user is required for YBA workflows.
|
||||
defaultUser:
|
||||
enabled: false
|
||||
username: yugabyte_k8s
|
||||
# Please set the password. Templating will fail if it is not set.
|
||||
password: ""
|
||||
code: k8soperator
|
||||
email: yugabyte_k8s@yugabyte.com
|
||||
|
||||
extraEnv: []
|
||||
|
||||
## Extra volumes
|
||||
## extraVolumesMounts are mandatory for each extraVolumes.
|
||||
## Ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#volume-v1-core
|
||||
## Example:
|
||||
# extraVolumes:
|
||||
# - name: custom-nfs-vol
|
||||
# persistentVolumeClaim:
|
||||
# claimName: some-nfs-claim
|
||||
extraVolumes: []
|
||||
|
||||
## Extra volume mounts
|
||||
## Ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#volumemount-v1-core
|
||||
## Example:
|
||||
# extraVolumeMounts:
|
||||
# - name: custom-nfs-vol
|
||||
# mountPath: /home/yugabyte/nfs-backup
|
||||
extraVolumeMounts: []
|
||||
|
||||
# In case client wants to enable the additional headers to the YBA's http response
|
||||
# Previously, it was possible via nginx, but given that we no longer have it, we can
|
||||
# expose the same as application config/runtime config.
|
||||
# Example: ["X-Content-Type-Options: nosniff", "Keep-Alive: timeout=5, max=1000"]
|
||||
custom_headers: []
|
||||
|
||||
## Configure PostgreSQL part of the application
|
||||
postgres:
|
||||
# DO NOT CHANGE if using OCP Certified helm chart
|
||||
user: postgres
|
||||
dbname: yugaware
|
||||
|
||||
# DO NOT CHANGE for standard images
|
||||
# The settings below are applied to postgresql.sample.conf. This is required because
|
||||
# postgres initdb would not respect custom settings otherwise. Note: Only huge_pages = off
|
||||
# is different from default value( try ).
|
||||
sampleConfig:
|
||||
max_connections: 100
|
||||
shared_buffers: 128MB
|
||||
dynamic_shared_memory_type: posix
|
||||
max_wal_size: 1GB
|
||||
min_wal_size: 80MB
|
||||
wal_buffers: 4MB
|
||||
logging_collector: on
|
||||
log_filename: postgresql-%a.log
|
||||
log_truncate_on_rotation: on
|
||||
log_rotation_age: 1d
|
||||
log_rotation_size: 0
|
||||
log_timezone: UTC
|
||||
datestyle: 'iso, mdy'
|
||||
timezone: UTC
|
||||
lc_messages: en_US.utf8
|
||||
lc_monetary: en_US.utf8
|
||||
lc_numeric: en_US.utf8
|
||||
lc_time: en_US.utf8
|
||||
default_text_search_config: pg_catalog.english
|
||||
listen_addresses: '*'
|
||||
huge_pages: off
|
||||
|
||||
service:
|
||||
## Expose internal Postgres as a Service
|
||||
enabled: false
|
||||
## Additional Service annotations
|
||||
annotations: {}
|
||||
## Service type
|
||||
type: "ClusterIP"
|
||||
## IP address for the LoadBalancer, works only if supported by the cloud provider
|
||||
ip: ""
|
||||
|
||||
resources:
|
||||
requests:
|
||||
cpu: "0.5"
|
||||
memory: 1Gi
|
||||
|
||||
# If external.host is set then we will connect to an external postgres database server instead of starting our own.
|
||||
external:
|
||||
host: ""
|
||||
port: 5432
|
||||
pass: ""
|
||||
dbname: postgres
|
||||
user: postgres
|
||||
|
||||
## JDBC connection parameters including the leading `?`.
|
||||
jdbcParams: ""
|
||||
|
||||
|
||||
## Extra volumes
|
||||
## extraVolumesMounts are mandatory for each extraVolumes.
|
||||
## Ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#volume-v1-core
|
||||
## Example:
|
||||
# extraVolumes:
|
||||
# - name: custom-nfs-vol
|
||||
# persistentVolumeClaim:
|
||||
# claimName: some-nfs-claim
|
||||
extraVolumes: []
|
||||
|
||||
## Extra volume mounts
|
||||
## Ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#volumemount-v1-core
|
||||
## Example:
|
||||
# extraVolumeMounts:
|
||||
# - name: custom-nfs-vol
|
||||
# mountPath: /home/yugabyte/nfs-backup
|
||||
extraVolumeMounts: []
|
||||
|
||||
## Configure YBDB part of the application
|
||||
yugabytedb:
|
||||
user: yugabyte
|
||||
dbname: yugaware
|
||||
|
||||
config:
|
||||
advertiseAddress : "127.0.0.1"
|
||||
# Default tserver_webserver_port is 9000, but it's used by yugaware.
|
||||
tserverWebserverPort: 9010
|
||||
ysqlPort: ""
|
||||
tserverFlags:
|
||||
yb_enable_read_committed_isolation: true
|
||||
masterFlags: {}
|
||||
enableColocatedTables: true
|
||||
startupRetryCount: 10
|
||||
|
||||
|
||||
service:
|
||||
## Expose Yugabyted UI as a Service
|
||||
enabled: false
|
||||
## Additional Service annotations
|
||||
annotations: {}
|
||||
## Service type
|
||||
type: "LoadBalancer"
|
||||
## IP address for the LoadBalancer, works only if supported by the cloud provider
|
||||
loadBalancerIP: ""
|
||||
yugabytedUiPort: 15433
|
||||
|
||||
resources:
|
||||
requests:
|
||||
cpu: "2"
|
||||
memory: 4Gi
|
||||
|
||||
tls:
|
||||
enabled: false
|
||||
hostname: "localhost"
|
||||
## Expects base64 encoded certificate, key, and CA certificate.
|
||||
## Populate these for non-self-signed certificates.
|
||||
## All three values should be base64 encoded.
|
||||
## These will be used to create server.pem and ca.pem files.
|
||||
## Note: The validity of the provided certificates is not verified.
|
||||
certificate: ""
|
||||
key: ""
|
||||
ca_certificate: ""
|
||||
sslProtocols: "" # if set, override default Nginx SSL protocols setting
|
||||
## cert-manager values
|
||||
## If cert-manager is enabled:
|
||||
## If genSelfsigned: true:
|
||||
## Create a self-signed issuer/clusterIssuer
|
||||
## Generate a rootCA using the above issuer.
|
||||
## Generate a tls certificate with secret name as: {{ .Release.Name }}-yugaware-tls-cert
|
||||
## Else if genSelfsigned: false:
|
||||
## Expect a clusterIssuer/issuer to be provided by user
|
||||
## Generate a tls cert based on above issuer with secret name as: {{ .Release.Name }}-yugaware-tls-cert
|
||||
certManager:
|
||||
enabled: false
|
||||
genSelfsigned: true
|
||||
useClusterIssuer: false
|
||||
clusterIssuer: cluster-ca
|
||||
issuer: yugaware-ca
|
||||
## Configuration for the TLS certificate requested from Issuer/ClusterIssuer
|
||||
configuration:
|
||||
duration: 8760h # 90d
|
||||
renewBefore: 240h # 15d
|
||||
algorithm: RSA # ECDSA or RSA
|
||||
# Can be 2048, 4096 or 8192 for RSA
|
||||
# Or 256, 384 or 521 for ECDSA
|
||||
keySize: 2048
|
||||
|
||||
## yugaware pod Security Context
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
|
||||
securityContext:
|
||||
enabled: true
|
||||
## fsGroup related values are set at the pod level.
|
||||
fsGroup: 10001
|
||||
fsGroupChangePolicy: "OnRootMismatch"
|
||||
## Expected to have runAsUser values != 0 when
|
||||
## runAsNonRoot is set to true, otherwise container creation fails.
|
||||
runAsUser: 10001
|
||||
runAsGroup: 10001
|
||||
runAsNonRoot: true
|
||||
|
||||
helm:
|
||||
timeout: 900
|
||||
packagePath: "/opt/yugabyte/helm"
|
||||
|
||||
domainName: "cluster.local"
|
||||
|
||||
helm2Legacy: false
|
||||
|
||||
ip_version_support: "v4_only" # v4_only, v6_only are the only supported values at the moment
|
||||
|
||||
rbac:
|
||||
## Set this to false if you don't have enough permissions to create
|
||||
## ClusterRole and Binding, for example an OpenShift cluster. When
|
||||
## set to false, some of the graphs from Container section of the
|
||||
## Metrics UI don't work.
|
||||
create: true
|
||||
|
||||
## In order to deploy on OpenShift Container Platform, set this to
|
||||
## true.
|
||||
ocpCompatibility:
|
||||
enabled: false
|
||||
|
||||
# Extra containers to add to the pod.
|
||||
sidecars: []
|
||||
|
||||
## Following two controls for placement of pod - nodeSelector and AZ affinity.
|
||||
## Note: Remember to also provide a yugaware.StorageClass that has a olumeBindingMode of
|
||||
## WaitForFirstConsumer so that the PVC is created in the right topology visible to this pod.
|
||||
## See https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
|
||||
## eg.
|
||||
## nodeSelector:
|
||||
## topology.kubernetes.io/region: us-west1
|
||||
nodeSelector: {}
|
||||
|
||||
## Affinity to a particular zone for the pod.
|
||||
## See https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
|
||||
## eg.
|
||||
## nodeAffinity:
|
||||
## requiredDuringSchedulingIgnoredDuringExecution:
|
||||
## nodeSelectorTerms:
|
||||
## - matchExpressions:
|
||||
## - key: failure-domain.beta.kubernetes.io/zone
|
||||
## operator: In
|
||||
## values:
|
||||
## - us-west1-a
|
||||
## - us-west1-b
|
||||
zoneAffinity: {}
|
||||
|
||||
## The tolerations that the pod should have.
|
||||
## See https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
|
||||
tolerations: []
|
||||
|
||||
|
||||
## @param dnsPolicy DNS Policy for pod
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/
|
||||
## E.g.
|
||||
## dnsPolicy: ClusterFirst
|
||||
dnsPolicy: ""
|
||||
## @param dnsConfig DNS Configuration pod
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/
|
||||
## E.g.
|
||||
## dnsConfig:
|
||||
## options:
|
||||
## - name: ndots
|
||||
## value: "4"
|
||||
dnsConfig: {}
|
||||
|
||||
## Don't want prometheus to scrape nodes and evaluate alert rules in some cases (for example - cloud).
|
||||
prometheus:
|
||||
## Setting this to false will disable scraping of TServer and Master
|
||||
## nodes (could be pods or VMs)
|
||||
scrapeNodes: true
|
||||
evaluateAlertRules: true
|
||||
retentionTime: 15d
|
||||
queryConcurrency: 20
|
||||
queryMaxSamples: 5000000
|
||||
queryTimeout: 30s
|
||||
## Set this to false to disable scraping of Kubernetes worker
|
||||
## nodes. Setting this to false will results in blank graphs of
|
||||
## resource utilization for Kubernetes universes. Useful for
|
||||
## scenarios where only VM based universes are being created.
|
||||
scrapeKubernetesNodes: true
|
||||
|
||||
resources:
|
||||
requests:
|
||||
cpu: "2"
|
||||
memory: 4Gi
|
||||
|
||||
## Prometheus remote write config, as described here:
|
||||
## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write
|
||||
## If tls configuration is needed, set prometheus.remoteWrite.tls.enabled = true and provide
|
||||
## necessary certificates/keys in base64 format as prometheus.remoteWrite.tls.[caCert|cert|key].
|
||||
## Remote write config should expect certs/keys in
|
||||
## /opt/remote_write/certs/[ca.crt|client.crt|client.key] respectively.
|
||||
remoteWrite:
|
||||
config: []
|
||||
tls:
|
||||
enabled: false
|
||||
## base64 encoded certificates and key expected
|
||||
caCert: ""
|
||||
clientCert: ""
|
||||
clientKey: ""
|
||||
|
||||
## Extra environment variables passed to the Prometheus container.
|
||||
## Ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.28/#envvar-v1-core
|
||||
## Example:
|
||||
# extraEnv:
|
||||
# - name: GOMEM
|
||||
# value: "5Gi"
|
||||
extraEnv: []
|
||||
|
||||
## Enabling 'selfmonitor' allows Prometheus to collect and monitor its metrics.
|
||||
selfMonitor: true
|
||||
|
||||
# Arbitrary key=value config entries for application.docker.conf
|
||||
additionalAppConf:
|
||||
stringConf: {}
|
||||
nonStringConf: {}
|
||||
|
||||
jdbcParams: ""
|
||||
|
||||
## Override the APIVersion used by policy group for
|
||||
## PodDisruptionBudget resources. The chart selects the correct
|
||||
## APIVersion based on the target Kubernetes cluster. You don't need
|
||||
## to modify this unless you are using helm template command i.e. GKE
|
||||
## app's deployer image against a Kubernetes cluster >= 1.21.
|
||||
# pdbPolicyVersionOverride: "v1beta1"
|
||||
pdbPolicyVersionOverride: ""
|
||||
|
||||
initContainers:
|
||||
prometheusConfiguration:
|
||||
resources:
|
||||
## https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-requests-and-limits-of-pod-and-container
|
||||
## Use the above link to learn more about Kubernetes resources configuration.
|
||||
requests:
|
||||
cpu: "0.25"
|
||||
memory: 500Mi
|
||||
|
||||
postgresUpgrade:
|
||||
resources:
|
||||
requests:
|
||||
cpu: "0.5"
|
||||
memory: 500Mi
|
||||
|
||||
dbInit:
|
||||
resources:
|
||||
requests:
|
||||
cpu: "0.25"
|
||||
memory: 500Mi
|
||||
|
78
index.yaml
78
index.yaml
|
@ -45281,6 +45281,32 @@ entries:
|
|||
- assets/yugabyte/yugabyte-2.14.11.tgz
|
||||
version: 2.14.11
|
||||
yugaware:
|
||||
- annotations:
|
||||
catalog.cattle.io/certified: partner
|
||||
catalog.cattle.io/display-name: YugabyteDB Anywhere
|
||||
catalog.cattle.io/kube-version: '>=1.18-0'
|
||||
catalog.cattle.io/release-name: yugaware
|
||||
charts.openshift.io/name: yugaware
|
||||
apiVersion: v2
|
||||
appVersion: 2024.1.3.0-b105
|
||||
created: "2024-10-23T12:40:25.808117116-06:00"
|
||||
description: YugabyteDB Anywhere provides deployment, orchestration, and monitoring
|
||||
for managing YugabyteDB clusters. YugabyteDB Anywhere can create a YugabyteDB
|
||||
cluster with multiple pods provided by Kubernetes or OpenShift and logically
|
||||
grouped together to form one logical distributed database.
|
||||
digest: 2e3c41d1c0a3cd872d08fa614a08eff2b604c34260ce6fce15dd39ecebf5ea34
|
||||
home: https://www.yugabyte.com
|
||||
icon: file://assets/icons/yugaware.jpg
|
||||
kubeVersion: '>=1.18-0'
|
||||
maintainers:
|
||||
- email: sanketh@yugabyte.com
|
||||
name: Sanketh Indarapu
|
||||
- email: gjalla@yugabyte.com
|
||||
name: Govardhan Reddy Jalla
|
||||
name: yugaware
|
||||
urls:
|
||||
- assets/yugabyte/yugaware-2024.1.3.tgz
|
||||
version: 2024.1.3
|
||||
- annotations:
|
||||
catalog.cattle.io/certified: partner
|
||||
catalog.cattle.io/display-name: YugabyteDB Anywhere
|
||||
|
@ -45489,32 +45515,6 @@ entries:
|
|||
urls:
|
||||
- assets/yugabyte/yugaware-2.18.4+1.tgz
|
||||
version: 2.18.4+1
|
||||
- annotations:
|
||||
catalog.cattle.io/certified: partner
|
||||
catalog.cattle.io/display-name: YugabyteDB Anywhere
|
||||
catalog.cattle.io/kube-version: '>=1.18-0'
|
||||
catalog.cattle.io/release-name: yugaware
|
||||
charts.openshift.io/name: yugaware
|
||||
apiVersion: v2
|
||||
appVersion: 2.18.3.1-b1
|
||||
created: "2023-09-22T15:03:28.347126179Z"
|
||||
description: YugabyteDB Anywhere provides deployment, orchestration, and monitoring
|
||||
for managing YugabyteDB clusters. YugabyteDB Anywhere can create a YugabyteDB
|
||||
cluster with multiple pods provided by Kubernetes or OpenShift and logically
|
||||
grouped together to form one logical distributed database.
|
||||
digest: f67c0ba761815d388746765a5b095166f274266075c586b0b00e6e07cbecd4c7
|
||||
home: https://www.yugabyte.com
|
||||
icon: file://assets/icons/yugaware.jpg
|
||||
kubeVersion: '>=1.18-0'
|
||||
maintainers:
|
||||
- email: sanketh@yugabyte.com
|
||||
name: Sanketh Indarapu
|
||||
- email: gjalla@yugabyte.com
|
||||
name: Govardhan Reddy Jalla
|
||||
name: yugaware
|
||||
urls:
|
||||
- assets/yugabyte/yugaware-2.18.3+1.tgz
|
||||
version: 2.18.3+1
|
||||
- annotations:
|
||||
catalog.cattle.io/certified: partner
|
||||
catalog.cattle.io/display-name: YugabyteDB Anywhere
|
||||
|
@ -45541,6 +45541,32 @@ entries:
|
|||
urls:
|
||||
- assets/yugabyte/yugaware-2.18.3+0.tgz
|
||||
version: 2.18.3+0
|
||||
- annotations:
|
||||
catalog.cattle.io/certified: partner
|
||||
catalog.cattle.io/display-name: YugabyteDB Anywhere
|
||||
catalog.cattle.io/kube-version: '>=1.18-0'
|
||||
catalog.cattle.io/release-name: yugaware
|
||||
charts.openshift.io/name: yugaware
|
||||
apiVersion: v2
|
||||
appVersion: 2.18.3.1-b1
|
||||
created: "2023-09-22T15:03:28.347126179Z"
|
||||
description: YugabyteDB Anywhere provides deployment, orchestration, and monitoring
|
||||
for managing YugabyteDB clusters. YugabyteDB Anywhere can create a YugabyteDB
|
||||
cluster with multiple pods provided by Kubernetes or OpenShift and logically
|
||||
grouped together to form one logical distributed database.
|
||||
digest: f67c0ba761815d388746765a5b095166f274266075c586b0b00e6e07cbecd4c7
|
||||
home: https://www.yugabyte.com
|
||||
icon: file://assets/icons/yugaware.jpg
|
||||
kubeVersion: '>=1.18-0'
|
||||
maintainers:
|
||||
- email: sanketh@yugabyte.com
|
||||
name: Sanketh Indarapu
|
||||
- email: gjalla@yugabyte.com
|
||||
name: Govardhan Reddy Jalla
|
||||
name: yugaware
|
||||
urls:
|
||||
- assets/yugabyte/yugaware-2.18.3+1.tgz
|
||||
version: 2.18.3+1
|
||||
- annotations:
|
||||
catalog.cattle.io/certified: partner
|
||||
catalog.cattle.io/display-name: YugabyteDB Anywhere
|
||||
|
|
|
@ -2,9 +2,5 @@ HelmRepo: https://charts.yugabyte.com
|
|||
HelmChart: yugaware
|
||||
Vendor: Yugabyte
|
||||
DisplayName: YugabyteDB Anywhere
|
||||
TrackVersions:
|
||||
- 2.14
|
||||
- 2.16
|
||||
- 2.18
|
||||
ChartMetadata:
|
||||
kubeVersion: '>=1.18-0'
|
||||
|
|
Loading…
Reference in New Issue