rancher-partner-charts/charts/percona/pxc-db/production-values.yaml

600 lines
17 KiB
YAML

# example production ready values for pxc-cluster.
# (you may still need to tune this for your own needs)
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
finalizers:
- delete-pxc-pods-in-order
# Can delete proxysql PVCs, they're recreatable.
- delete-proxysql-pvc
## Don't delete database PVCs.
# - delete-pxc-pvc
## Set this if you want to delete cert manager certificates on cluster deletion
# - delete-ssl
nameOverride: "production"
fullnameOverride: "production"
operatorImageRepository: percona/percona-xtradb-cluster-operator
crVersion: 1.12.0
ignoreAnnotations: []
# - iam.amazonaws.com/role
ignoreLabels: []
# - rack
pause: false
initImage: ""
allowUnsafeConfigurations: false
updateStrategy: SmartUpdate
upgradeOptions:
versionServiceEndpoint: https://check.percona.com
apply: disabled
schedule: "0 4 * * *"
enableCRValidationWebhook: false
tls: {}
# SANs:
# - pxc-1.example.com
# - pxc-2.example.com
# - pxc-3.example.com
# issuerConf:
# name: special-selfsigned-issuer
# kind: ClusterIssuer
# group: cert-manager.io
pxc:
size: 3
image:
repository: percona/percona-xtradb-cluster
tag: 8.0.29-21.1
# imagePullPolicy: Always
autoRecovery: true
# expose:
# enabled: true
# type: LoadBalancer
# trafficPolicy: Local
# loadBalancerSourceRanges:
# - 10.0.0.0/8
# annotations:
# networking.gke.io/load-balancer-type: "Internal"
# replicationChannels:
# - name: pxc1_to_pxc2
# isSource: true
# - name: pxc2_to_pxc1
# isSource: false
# configuration:
# sourceRetryCount: 3
# sourceConnectRetry: 60
# ssl: false
# sslSkipVerify: true
# ca: '/etc/mysql/ssl/ca.crt'
# sourcesList:
# - host: 10.95.251.101
# port: 3306
# weight: 100
# schedulerName: mycustom-scheduler
imagePullSecrets: []
# - name: private-registry-credentials
annotations: {}
# iam.amazonaws.com/role: role-arn
labels: {}
# rack: rack-22
# priorityClassName: high-priority
readinessDelaySec: 15
livenessDelaySec: 300
## Uncomment to pass in a mysql config file
# configuration: |
# [mysqld]
# wsrep_debug=ON
# wsrep_provider_options="gcache.size=1G; gcache.recover=yes"
# envVarsSecret: my-env-var-secrets
resources:
# Set these to the miminum you'd expect your database to need.
requests:
memory: 1G
cpu: 600m
# db resources are sacred, so don't limit it.
limits: {}
# runtimeClassName: image-rc
sidecars: []
sidecarVolumes: []
sidecarPVCs: []
sidecarResources:
requests: {}
limits: {}
nodeSelector: {}
# disktype: ssd
affinity:
antiAffinityTopologyKey: "kubernetes.io/hostname"
# advanced:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: kubernetes.io/e2e-az-name
# operator: In
# values:
# - e2e-az1
# - e2e-az2
tolerations: []
# - key: "node.alpha.kubernetes.io/unreachable"
# operator: "Exists"
# effect: "NoExecute"
# tolerationSeconds: 6000
gracePeriod: 600
podDisruptionBudget:
# With only 3 nodes, don't let Kubernetes disrupt more than one at a time.
maxUnavailable: 1
# minAvailable: 0
persistence:
enabled: true
## set storage class if you need something fancy for your cloud.
# storageClass: "-"
accessMode: ReadWriteOnce
## Size this according to your expected data size. Resizing a PVC isn't easy
## So don't be tight fisted.
size: 8Gi
## Don't disable TLS you monster
disableTLS: false
## You should use certManager ... if you don't, you should create the certificates
## Don't let helm do it for you for prod.
certManager: true
## You should absolutely provide a pre-create secret here, don't rely on Helm to
## Pass in passwords etc.
# clusterSecretName:
readinessProbes:
initialDelaySeconds: 15
timeoutSeconds: 15
periodSeconds: 30
successThreshold: 1
failureThreshold: 5
livenessProbes:
initialDelaySeconds: 300
timeoutSeconds: 5
periodSeconds: 10
successThreshold: 1
failureThreshold: 3
# A custom Kubernetes Security Context for a Container to be used instead of the default one
# containerSecurityContext:
# privileged: false
# A custom Kubernetes Security Context for a Pod to be used instead of the default one
# podSecurityContext:
# fsGroup: 1001
# supplementalGroups:
# - 1001
# serviceAccountName: percona-xtradb-cluster-operator-workload
haproxy:
enabled: true
size: 3
image: ""
# imagePullPolicy: Always
imagePullSecrets: []
# - name: private-registry-credentials
# configuration: |
#
# the actual default configuration file can be found here https://github.com/percona/percona-docker/blob/main/haproxy/dockerdir/etc/haproxy/haproxy-global.cfg
#
# global
# maxconn 2048
# external-check
# insecure-fork-wanted
# stats socket /etc/haproxy/pxc/haproxy.sock mode 600 expose-fd listeners level admin
#
# defaults
# default-server init-addr last,libc,none
# log global
# mode tcp
# retries 10
# timeout client 28800s
# timeout connect 100500
# timeout server 28800s
#
# frontend galera-in
# bind *:3309 accept-proxy
# bind *:3306
# mode tcp
# option clitcpka
# default_backend galera-nodes
#
# frontend galera-admin-in
# bind *:33062
# mode tcp
# option clitcpka
# default_backend galera-admin-nodes
#
# frontend galera-replica-in
# bind *:3307
# mode tcp
# option clitcpka
# default_backend galera-replica-nodes
#
# frontend galera-mysqlx-in
# bind *:33060
# mode tcp
# option clitcpka
# default_backend galera-mysqlx-nodes
#
# frontend stats
# bind *:8404
# mode http
# option http-use-htx
# http-request use-service prometheus-exporter if { path /metrics }
annotations: {}
# iam.amazonaws.com/role: role-arn
labels: {}
# rack: rack-22
# serviceType: ClusterIP
# externalTrafficPolicy: Cluster
# runtimeClassName: image-rc
# loadBalancerSourceRanges:
# - 10.0.0.0/8
# loadBalancerIP: 127.0.0.1
# serviceAnnotations:
# service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http
# serviceLabels:
# rack: rack-23
replicasServiceEnabled: true
# replicasLoadBalancerSourceRanges:
# - 10.0.0.0/8
# replicasLoadBalancerIP: 127.0.0.1
# replicasServiceType: ClusterIP
# replicasExternalTrafficPolicy: Cluster
# replicasServiceAnnotations:
# service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http
# replicasServiceLabels:
# rack: rack-23
# priorityClassName: high-priority
# schedulerName: mycustom-scheduler
readinessDelaySec: 15
livenessDelaySec: 300
# envVarsSecret: my-env-var-secrets
resources:
requests:
memory: 1G
cpu: 600m
limits: {}
# memory: 1G
# cpu: 600m
sidecars: []
sidecarVolumes: []
sidecarPVCs: []
sidecarResources:
requests: {}
limits: {}
nodeSelector: {}
# disktype: ssd
# serviceAccountName: percona-xtradb-cluster-operator-workload
affinity:
antiAffinityTopologyKey: "kubernetes.io/hostname"
# advanced:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: kubernetes.io/e2e-az-name
# operator: In
# values:
# - e2e-az1
# - e2e-az2
tolerations: []
# - key: "node.alpha.kubernetes.io/unreachable"
# operator: "Exists"
# effect: "NoExecute"
# tolerationSeconds: 6000
gracePeriod: 30
# only one of `maxUnavailable` or `minAvailable` can be set.
podDisruptionBudget:
maxUnavailable: 1
# minAvailable: 0
readinessProbes:
initialDelaySeconds: 15
timeoutSeconds: 1
periodSeconds: 5
successThreshold: 1
failureThreshold: 3
livenessProbes:
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 30
successThreshold: 1
failureThreshold: 4
# A custom Kubernetes Security Context for a Container to be used instead of the default one
# containerSecurityContext:
# privileged: false
# A custom Kubernetes Security Context for a Pod to be used instead of the default one
# podSecurityContext:
# fsGroup: 1001
# supplementalGroups:
# - 1001
proxysql:
enabled: false
size: 3
image: ""
# imagePullPolicy: Always
imagePullSecrets: []
# configuration: |
# datadir="/var/lib/proxysql"
#
# admin_variables =
# {
# admin_credentials="proxyadmin:admin_password"
# mysql_ifaces="0.0.0.0:6032"
# refresh_interval=2000
#
# cluster_username="proxyadmin"
# cluster_password="admin_password"
# checksum_admin_variables=false
# checksum_ldap_variables=false
# checksum_mysql_variables=false
# cluster_check_interval_ms=200
# cluster_check_status_frequency=100
# cluster_mysql_query_rules_save_to_disk=true
# cluster_mysql_servers_save_to_disk=true
# cluster_mysql_users_save_to_disk=true
# cluster_proxysql_servers_save_to_disk=true
# cluster_mysql_query_rules_diffs_before_sync=1
# cluster_mysql_servers_diffs_before_sync=1
# cluster_mysql_users_diffs_before_sync=1
# cluster_proxysql_servers_diffs_before_sync=1
# }
#
# mysql_variables=
# {
# monitor_password="monitor"
# monitor_galera_healthcheck_interval=1000
# threads=2
# max_connections=2048
# default_query_delay=0
# default_query_timeout=10000
# poll_timeout=2000
# interfaces="0.0.0.0:3306"
# default_schema="information_schema"
# stacksize=1048576
# connect_timeout_server=10000
# monitor_history=60000
# monitor_connect_interval=20000
# monitor_ping_interval=10000
# ping_timeout_server=200
# commands_stats=true
# sessions_sort=true
# have_ssl=true
# ssl_p2s_ca="/etc/proxysql/ssl-internal/ca.crt"
# ssl_p2s_cert="/etc/proxysql/ssl-internal/tls.crt"
# ssl_p2s_key="/etc/proxysql/ssl-internal/tls.key"
# ssl_p2s_cipher="ECDHE-RSA-AES128-GCM-SHA256"
# }
# - name: private-registry-credentials
annotations: {}
# iam.amazonaws.com/role: role-arn
labels: {}
# rack: rack-22
# serviceType: ClusterIP
# externalTrafficPolicy: Cluster
# runtimeClassName: image-rc
# loadBalancerSourceRanges:
# - 10.0.0.0/8
# loadBalancerIP: 127.0.0.1
# serviceAnnotations:
# service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http
# serviceLabels:
# rack: rack-23
# priorityClassName: high-priority
# schedulerName: mycustom-scheduler
readinessDelaySec: 15
livenessDelaySec: 300
# envVarsSecret: my-env-var-secrets
resources:
requests:
memory: 1G
cpu: 600m
limits: {}
# memory: 1G
# cpu: 600m
sidecars: []
sidecarVolumes: []
sidecarPVCs: []
sidecarResources:
requests: {}
limits: {}
nodeSelector: {}
# disktype: ssd
# serviceAccountName: percona-xtradb-cluster-operator-workload
affinity:
antiAffinityTopologyKey: "kubernetes.io/hostname"
# advanced:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: kubernetes.io/e2e-az-name
# operator: In
# values:
# - e2e-az1
# - e2e-az2
tolerations: []
# - key: "node.alpha.kubernetes.io/unreachable"
# operator: "Exists"
# effect: "NoExecute"
# tolerationSeconds: 6000
gracePeriod: 30
# only one of `maxUnavailable` or `minAvailable` can be set.
podDisruptionBudget:
maxUnavailable: 1
# minAvailable: 0
persistence:
enabled: true
# storageClass: "-"
accessMode: ReadWriteOnce
size: 8Gi
# A custom Kubernetes Security Context for a Container to be used instead of the default one
# containerSecurityContext:
# privileged: false
# A custom Kubernetes Security Context for a Pod to be used instead of the default one
# podSecurityContext:
# fsGroup: 1001
# supplementalGroups:
# - 1001
logcollector:
enabled: true
image: ""
# imagePullPolicy: Always
imagePullSecrets: []
# configuration: |
# [OUTPUT]
# Name es
# Match *
# Host 192.168.2.3
# Port 9200
# Index my_index
# Type my_type
resources:
requests:
memory: 100M
cpu: 200m
limits: {}
pmm:
enabled: false
image:
repository: percona/pmm-client
tag: 2.32.0
# imagePullPolicy: Always
imagePullSecrets: []
serverHost: monitoring-service
serverUser: admin
resources:
requests:
memory: 150M
cpu: 300m
limits: {}
backup:
enabled: true
image: ""
# backoffLimit: 6
# serviceAccountName: percona-xtradb-cluster-operator
# imagePullPolicy: Always
imagePullSecrets: []
# - name: private-registry-credentials
pitr:
enabled: false
storageName: s3-us-west-binlogs
timeBetweenUploads: 60
resources:
requests: {}
limits: {}
storages:
fs-pvc:
type: filesystem
volume:
persistentVolumeClaim:
# storageClassName: standard
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 6Gi
## Set up backups to your S3 bucket!!!
# s3-us-west:
# type: s3
# verifyTLS: true
# nodeSelector:
# storage: tape
# backupWorker: 'True'
# resources:
# requests:
# memory: 1G
# cpu: 600m
# affinity:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: backupWorker
# operator: In
# values:
# - 'True'
# tolerations:
# - key: "backupWorker"
# operator: "Equal"
# value: "True"
# effect: "NoSchedule"
# annotations:
# testName: scheduled-backup
# labels:
# backupWorker: 'True'
# schedulerName: 'default-scheduler'
# priorityClassName: 'high-priority'
# containerSecurityContext:
# privileged: true
# podSecurityContext:
# fsGroup: 1001
# supplementalGroups: [1001, 1002, 1003]
# s3:
# bucket: S3-BACKUP-BUCKET-NAME-HERE
# # Use credentialsSecret OR credentialsAccessKey/credentialsSecretKey
# credentialsSecret: my-cluster-name-backup-s3
# #credentialsAccessKey: REPLACE-WITH-AWS-ACCESS-KEY
# #credentialsSecretKey: REPLACE-WITH-AWS-SECRET-KEY
# region: us-west-2
# endpointUrl: https://sfo2.digitaloceanspaces.com
# s3-us-west-binlogs:
# type: s3
# s3:
# bucket: S3-BACKUP-BUCKET-NAME-HERE/DIRECTORY
# credentialsSecret: my-cluster-name-backup-s3
# region: us-west-2
# endpointUrl: https://sfo2.digitaloceanspaces.com
# azure-blob:
# type: azure
# azure:
# credentialsSecret: azure-secret
# container: test
# endpointUrl: https://accountName.blob.core.windows.net
# storageClass: Hot
schedule:
- name: "daily-backup"
schedule: "0 0 * * *"
keep: 5
storageName: fs-pvc
## Schedule s3 backups!!
# - name: "sat-night-backup"
# schedule: "0 0 * * 6"
# keep: 3
# storageName: s3-us-west
secrets:
## You should be overriding these with your own or specify name for clusterSecretName.
passwords:
root: insecure-root-password
xtrabackup: insecure-xtrabackup-password
monitor: insecure-monitor-password
clustercheck: insecure-clustercheck-password
proxyadmin: insecure-proxyadmin-password
pmmserver: insecure-pmmserver-password
# If pmmserverkey is set in that case pmmserver pass will not be included
# pmmserverkey: set-pmmserver-api-key
operator: insecure-operator-password
replication: insecure-replication-password
## If you are using `cert-manager` you can skip this next section.
## If not using cert-manager, you should set these!!!!
tls: {}
# This should be the name of a secret that contains certificates.
# it should have the following keys: `ca.crt`, `tls.crt`, `tls.key`
# If not set the Helm chart will attempt to create certificates
# for you [not recommended for prod]:
# cluster:
# This should be the name of a secret that contains certificates.
# it should have the following keys: `ca.crt`, `tls.crt`, `tls.key`
# If not set the Helm chart will attempt to create certificates
# for you [not recommended for prod]:
# internal:
# logCollector: cluster1-log-collector-secrets
# vault: keyring-secret-vault