# Copyright VMware, Inc. # SPDX-License-Identifier: APACHE-2.0 ## @section Global parameters ## Global Docker image parameters ## Please, note that this will override the image parameters, including dependencies, configured to use the global value ## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass ## ## @param global.imageRegistry Global Docker image registry ## @param global.imagePullSecrets Global Docker registry secret names as an array ## @param global.storageClass Global StorageClass for Persistent Volume(s) ## global: imageRegistry: "" ## E.g. ## imagePullSecrets: ## - myRegistryKeySecretName ## imagePullSecrets: [] storageClass: "" ## @section Common parameters ## ## @param kubeVersion Force target Kubernetes version (using Helm capabilities if not set) ## kubeVersion: "" ## @param nameOverride String to partially override common.names.fullname template (will maintain the release name) ## nameOverride: "" ## @param fullnameOverride String to fully override common.names.fullname template ## fullnameOverride: "" ## @param namespaceOverride String to fully override common.names.namespace ## namespaceOverride: "" ## @param commonLabels Labels to add to all deployed objects ## commonLabels: {} ## @param commonAnnotations Annotations to add to all deployed objects ## commonAnnotations: {} ## @param clusterDomain Kubernetes cluster domain name ## clusterDomain: cluster.local ## @param extraDeploy Array of extra objects to deploy with the release ## extraDeploy: [] ## @param initScripts Dictionary of init scripts. Evaluated as a template. ## Specify dictionary of scripts to be run at first boot ## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory ## For example: ## initScripts: ## my_init_script.sh: | ## #!/bin/sh ## echo "Do something." ## initScripts: {} ## @param initScriptsCM ConfigMap with the init scripts. Evaluated as a template. ## Note: This will override initScripts ## initScriptsCM: "" ## @param initScriptsSecret Secret containing `/docker-entrypoint-initdb.d` scripts to be executed at initialization time that contain sensitive data. Evaluated as a template. ## initScriptsSecret: "" ## Enable diagnostic mode in the deployment ## diagnosticMode: ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) ## enabled: false ## @param diagnosticMode.command Command to override all containers in the deployment ## command: - sleep ## @param diagnosticMode.args Args to override all containers in the deployment ## args: - infinity ## @section Spark parameters ## ## Bitnami Spark image version ## ref: https://hub.docker.com/r/bitnami/spark/tags/ ## @param image.registry Spark image registry ## @param image.repository Spark image repository ## @param image.tag Spark image tag (immutable tags are recommended) ## @param image.digest Spark image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag ## @param image.pullPolicy Spark image pull policy ## @param image.pullSecrets Specify docker-registry secret names as an array ## @param image.debug Enable image debug mode ## image: registry: docker.io repository: bitnami/spark tag: 3.4.1-debian-11-r6 digest: "" ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images ## pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## e.g: ## pullSecrets: ## - myRegistryKeySecretName ## pullSecrets: [] ## Set to true if you would like to see extra information on logs ## It turns BASH and/or NAMI debugging in the image ## debug: false ## @param hostNetwork Enable HOST Network ## If hostNetwork is true, then dnsPolicy is set to ClusterFirstWithHostNet ## hostNetwork: false ## @section Spark master parameters ## ## Spark master specific configuration ## master: ## @param master.existingConfigmap The name of an existing ConfigMap with your custom configuration for master ## existingConfigmap: "" ## @param master.containerPorts.http Specify the port where the web interface will listen on the master over HTTP ## @param master.containerPorts.https Specify the port where the web interface will listen on the master over HTTPS ## @param master.containerPorts.cluster Specify the port where the master listens to communicate with workers ## containerPorts: http: 8080 https: 8480 cluster: 7077 ## @param master.hostAliases Deployment pod host aliases ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ ## hostAliases: [] ## @param master.extraContainerPorts Specify the port where the running jobs inside the masters listens ## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#containerport-v1-core ## e.g: ## - name: myapp ## containerPort: 8000 ## protocol: TCP ## extraContainerPorts: [] ## @param master.daemonMemoryLimit Set the memory limit for the master daemon ## daemonMemoryLimit: "" ## @param master.configOptions Use a string to set the config options for in the form "-Dx=y" ## configOptions: "" ## @param master.extraEnvVars Extra environment variables to pass to the master container ## For example: ## extraEnvVars: ## - name: SPARK_DAEMON_JAVA_OPTS ## value: -Dx=y ## extraEnvVars: [] ## @param master.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for master nodes ## extraEnvVarsCM: "" ## @param master.extraEnvVarsSecret Name of existing Secret containing extra env vars for master nodes ## extraEnvVarsSecret: "" ## Kubernetes Pods Security Context ## https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ ## @param master.podSecurityContext.enabled Enable security context ## @param master.podSecurityContext.fsGroup Set master pod's Security Context Group ID ## @param master.podSecurityContext.runAsUser Set master pod's Security Context User ID ## @param master.podSecurityContext.runAsGroup Set master pod's Security Context Group ID ## @param master.podSecurityContext.seLinuxOptions Set master pod's Security Context SELinux options ## podSecurityContext: enabled: true fsGroup: 1001 runAsUser: 1001 runAsGroup: 0 seLinuxOptions: {} ## Configure Container Security Context ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container ## @param master.containerSecurityContext.enabled Enabled master containers' Security Context ## @param master.containerSecurityContext.runAsUser Set master containers' Security Context runAsUser ## @param master.containerSecurityContext.runAsNonRoot Set master containers' Security Context runAsNonRoot ## @param master.containerSecurityContext.readOnlyRootFilesystem Set master containers' Security Context runAsNonRoot ## containerSecurityContext: enabled: true runAsUser: 1001 runAsNonRoot: true readOnlyRootFilesystem: false ## @param master.command Override default container command (useful when using custom images) ## command: [] ## @param master.args Override default container args (useful when using custom images) ## args: [] ## @param master.podAnnotations Annotations for pods in StatefulSet ## podAnnotations: {} ## @param master.podLabels Extra labels for pods in StatefulSet ## podLabels: {} ## @param master.podAffinityPreset Spark master pod affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity ## podAffinityPreset: "" ## @param master.podAntiAffinityPreset Spark master pod anti-affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity ## podAntiAffinityPreset: soft ## Spark master node affinity preset ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity ## nodeAffinityPreset: ## @param master.nodeAffinityPreset.type Spark master node affinity preset type. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` ## type: "" ## @param master.nodeAffinityPreset.key Spark master node label key to match Ignored if `master.affinity` is set. ## E.g. ## key: "kubernetes.io/e2e-az-name" ## key: "" ## @param master.nodeAffinityPreset.values Spark master node label values to match. Ignored if `master.affinity` is set. ## E.g. ## values: ## - e2e-az1 ## - e2e-az2 ## values: [] ## @param master.affinity Spark master affinity for pod assignment ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity ## Note: master.podAffinityPreset, master.podAntiAffinityPreset, and master.nodeAffinityPreset will be ignored when it's set ## affinity: {} ## @param master.nodeSelector Spark master node labels for pod assignment ## ref: https://kubernetes.io/docs/user-guide/node-selection/ ## nodeSelector: {} ## @param master.tolerations Spark master tolerations for pod assignment ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ ## tolerations: [] ## @param master.updateStrategy.type Master statefulset strategy type. ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy ## e.g: ## updateStrategy: ## type: RollingUpdate ## rollingUpdate: ## maxSurge: 25% ## maxUnavailable: 25% ## updateStrategy: type: RollingUpdate ## @param master.priorityClassName master pods' priorityClassName ## priorityClassName: "" ## @param master.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods ## topologySpreadConstraints: [] ## @param master.schedulerName Name of the k8s scheduler (other than default) for master pods ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ ## schedulerName: "" ## @param master.terminationGracePeriodSeconds Seconds Redmine pod needs to terminate gracefully ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods ## terminationGracePeriodSeconds: "" ## @param master.lifecycleHooks for the master container(s) to automate configuration before or after startup ## lifecycleHooks: {} ## @param master.extraVolumes Optionally specify extra list of additional volumes for the master pod(s) ## extraVolumes: [] ## @param master.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the master container(s) ## extraVolumeMounts: [] ## @param master.extraVolumeClaimTemplates Optionally specify extra list of volumesClaimTemplates for the master statefulset ## extraVolumeClaimTemplates: [] ## Container resource requests and limits ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ ## We usually recommend not to specify default resources and to leave this as a conscious ## choice for the user. This also increases chances charts run on environments with little ## resources, such as Minikube. If you do want to specify resources, uncomment the following ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. ## @param master.resources.limits The resources limits for the container ## @param master.resources.requests The requested resources for the container ## resources: ## Example: ## limits: ## cpu: 250m ## memory: 256Mi ## limits: {} ## Examples: ## requests: ## cpu: 250m ## memory: 256Mi ## requests: {} ## Configure extra options for liveness probe ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes ## @param master.livenessProbe.enabled Enable livenessProbe ## @param master.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe ## @param master.livenessProbe.periodSeconds Period seconds for livenessProbe ## @param master.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe ## @param master.livenessProbe.failureThreshold Failure threshold for livenessProbe ## @param master.livenessProbe.successThreshold Success threshold for livenessProbe ## livenessProbe: enabled: true initialDelaySeconds: 180 periodSeconds: 20 timeoutSeconds: 5 failureThreshold: 6 successThreshold: 1 ## Configure extra options for readiness probe ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes ## @param master.readinessProbe.enabled Enable readinessProbe ## @param master.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe ## @param master.readinessProbe.periodSeconds Period seconds for readinessProbe ## @param master.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe ## @param master.readinessProbe.failureThreshold Failure threshold for readinessProbe ## @param master.readinessProbe.successThreshold Success threshold for readinessProbe ## readinessProbe: enabled: true initialDelaySeconds: 30 periodSeconds: 10 timeoutSeconds: 5 failureThreshold: 6 successThreshold: 1 ## Configure extra options for startup probe ## @param master.startupProbe.enabled Enable startupProbe ## @param master.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe ## @param master.startupProbe.periodSeconds Period seconds for startupProbe ## @param master.startupProbe.timeoutSeconds Timeout seconds for startupProbe ## @param master.startupProbe.failureThreshold Failure threshold for startupProbe ## @param master.startupProbe.successThreshold Success threshold for startupProbe ## startupProbe: enabled: false initialDelaySeconds: 30 periodSeconds: 10 timeoutSeconds: 5 failureThreshold: 6 successThreshold: 1 ## @param master.customLivenessProbe Custom livenessProbe that overrides the default one ## customLivenessProbe: {} ## @param master.customReadinessProbe Custom readinessProbe that overrides the default one ## customReadinessProbe: {} ## @param master.customStartupProbe Custom startupProbe that overrides the default one ## customStartupProbe: {} ## @param master.sidecars Add additional sidecar containers to the master pod(s) ## e.g: ## sidecars: ## - name: your-image-name ## image: your-image ## imagePullPolicy: Always ## ports: ## - name: portname ## containerPort: 1234 ## sidecars: [] ## @param master.initContainers Add initContainers to the master pods. ## Example: ## initContainers: ## - name: your-image-name ## image: your-image ## imagePullPolicy: Always ## ports: ## - name: portname ## containerPort: 1234 ## initContainers: [] ## @section Spark worker parameters ## ## Spark worker specific configuration ## worker: ## @param worker.existingConfigmap The name of an existing ConfigMap with your custom configuration for workers ## existingConfigmap: "" ## @param worker.containerPorts.http Specify the port where the web interface will listen on the worker over HTTP ## @param worker.containerPorts.https Specify the port where the web interface will listen on the worker over HTTPS ## @param worker.containerPorts.cluster Specify the port where the worker listens to communicate with workers ## containerPorts: http: 8080 https: 8480 cluster: "" ## @param worker.hostAliases Add deployment host aliases ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ ## hostAliases: [] ## @param worker.extraContainerPorts Specify the port where the running jobs inside the workers listens ## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#containerport-v1-core ## e.g: ## - name: myapp ## containerPort: 8000 ## protocol: TCP ## extraContainerPorts: [] ## @param worker.daemonMemoryLimit Set the memory limit for the worker daemon ## daemonMemoryLimit: "" ## @param worker.memoryLimit Set the maximum memory the worker is allowed to use ## memoryLimit: "" ## @param worker.coreLimit Se the maximum number of cores that the worker can use ## coreLimit: "" ## @param worker.dir Set a custom working directory for the application ## dir: "" ## @param worker.javaOptions Set options for the JVM in the form `-Dx=y` ## javaOptions: "" ## @param worker.configOptions Set extra options to configure the worker in the form `-Dx=y` ## configOptions: "" ## @param worker.extraEnvVars An array to add extra env vars ## For example: ## extraEnvVars: ## - name: SPARK_DAEMON_JAVA_OPTS ## value: -Dx=y ## extraEnvVars: [] ## @param worker.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for worker nodes ## extraEnvVarsCM: "" ## @param worker.extraEnvVarsSecret Name of existing Secret containing extra env vars for worker nodes ## extraEnvVarsSecret: "" ## @param worker.replicaCount Number of spark workers (will be the minimum number when autoscaling is enabled) ## replicaCount: 2 ## Kubernetes Pods Security Context ## https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ ## @param worker.podSecurityContext.enabled Enable security context ## @param worker.podSecurityContext.fsGroup Group ID for the container ## @param worker.podSecurityContext.runAsUser User ID for the container ## @param worker.podSecurityContext.runAsGroup Group ID for the container ## @param worker.podSecurityContext.seLinuxOptions SELinux options for the container ## podSecurityContext: enabled: true fsGroup: 1001 runAsUser: 1001 runAsGroup: 0 seLinuxOptions: {} ## Configure Container Security Context ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container ## @param worker.containerSecurityContext.enabled Enabled worker containers' Security Context ## @param worker.containerSecurityContext.runAsUser Set worker containers' Security Context runAsUser ## @param worker.containerSecurityContext.runAsNonRoot Set worker containers' Security Context runAsNonRoot ## @param worker.containerSecurityContext.readOnlyRootFilesystem Set worker containers' Security Context runAsNonRoot ## containerSecurityContext: enabled: true runAsUser: 1001 runAsNonRoot: true readOnlyRootFilesystem: false ## @param worker.command Override default container command (useful when using custom images) ## command: [] ## @param worker.args Override default container args (useful when using custom images) ## args: [] ## @param worker.podAnnotations Annotations for pods in StatefulSet ## podAnnotations: {} ## @param worker.podLabels Extra labels for pods in StatefulSet ## podLabels: {} ## @param worker.podAffinityPreset Spark worker pod affinity preset. Ignored if `worker.affinity` is set. Allowed values: `soft` or `hard` ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity ## podAffinityPreset: "" ## @param worker.podAntiAffinityPreset Spark worker pod anti-affinity preset. Ignored if `worker.affinity` is set. Allowed values: `soft` or `hard` ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity ## podAntiAffinityPreset: soft ## Spark worker node affinity preset ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity ## nodeAffinityPreset: ## @param worker.nodeAffinityPreset.type Spark worker node affinity preset type. Ignored if `worker.affinity` is set. Allowed values: `soft` or `hard` ## type: "" ## @param worker.nodeAffinityPreset.key Spark worker node label key to match Ignored if `worker.affinity` is set. ## E.g. ## key: "kubernetes.io/e2e-az-name" ## key: "" ## @param worker.nodeAffinityPreset.values Spark worker node label values to match. Ignored if `worker.affinity` is set. ## E.g. ## values: ## - e2e-az1 ## - e2e-az2 ## values: [] ## @param worker.affinity Spark worker affinity for pod assignment ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity ## Note: worker.podAffinityPreset, worker.podAntiAffinityPreset, and worker.nodeAffinityPreset will be ignored when it's set ## affinity: {} ## @param worker.nodeSelector Spark worker node labels for pod assignment ## ref: https://kubernetes.io/docs/user-guide/node-selection/ ## nodeSelector: {} ## @param worker.tolerations Spark worker tolerations for pod assignment ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ ## tolerations: [] ## @param worker.updateStrategy.type Worker statefulset strategy type. ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy ## e.g: ## updateStrategy: ## type: RollingUpdate ## rollingUpdate: ## maxSurge: 25% ## maxUnavailable: 25% ## updateStrategy: type: RollingUpdate ## @param worker.podManagementPolicy Statefulset Pod Management Policy Type ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies ## podManagementPolicy: OrderedReady ## @param worker.priorityClassName worker pods' priorityClassName ## priorityClassName: "" ## @param worker.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods ## topologySpreadConstraints: [] ## @param worker.schedulerName Name of the k8s scheduler (other than default) for worker pods ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ ## schedulerName: "" ## @param worker.terminationGracePeriodSeconds Seconds Redmine pod needs to terminate gracefully ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods ## terminationGracePeriodSeconds: "" ## @param worker.lifecycleHooks for the worker container(s) to automate configuration before or after startup ## lifecycleHooks: {} ## @param worker.extraVolumes Optionally specify extra list of additional volumes for the worker pod(s) ## extraVolumes: [] ## @param worker.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the master container(s) ## extraVolumeMounts: [] ## @param worker.extraVolumeClaimTemplates Optionally specify extra list of volumesClaimTemplates for the worker statefulset ## extraVolumeClaimTemplates: [] ## Container resource requests and limits ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ ## We usually recommend not to specify default resources and to leave this as a conscious ## choice for the user. This also increases chances charts run on environments with little ## resources, such as Minikube. If you do want to specify resources, uncomment the following ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. ## @param worker.resources.limits The resources limits for the container ## @param worker.resources.requests The requested resources for the container ## resources: ## Example: ## limits: ## cpu: 250m ## memory: 256Mi ## limits: {} ## Examples: ## requests: ## cpu: 250m ## memory: 256Mi ## requests: {} ## Configure extra options for liveness probe ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes ## @param worker.livenessProbe.enabled Enable livenessProbe ## @param worker.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe ## @param worker.livenessProbe.periodSeconds Period seconds for livenessProbe ## @param worker.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe ## @param worker.livenessProbe.failureThreshold Failure threshold for livenessProbe ## @param worker.livenessProbe.successThreshold Success threshold for livenessProbe ## livenessProbe: enabled: true initialDelaySeconds: 180 periodSeconds: 20 timeoutSeconds: 5 failureThreshold: 6 successThreshold: 1 ## Configure extra options for readiness probe ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes ## @param worker.readinessProbe.enabled Enable readinessProbe ## @param worker.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe ## @param worker.readinessProbe.periodSeconds Period seconds for readinessProbe ## @param worker.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe ## @param worker.readinessProbe.failureThreshold Failure threshold for readinessProbe ## @param worker.readinessProbe.successThreshold Success threshold for readinessProbe ## readinessProbe: enabled: true initialDelaySeconds: 30 periodSeconds: 10 timeoutSeconds: 5 failureThreshold: 6 successThreshold: 1 ## Configure extra options for startup probe ## @param worker.startupProbe.enabled Enable startupProbe ## @param worker.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe ## @param worker.startupProbe.periodSeconds Period seconds for startupProbe ## @param worker.startupProbe.timeoutSeconds Timeout seconds for startupProbe ## @param worker.startupProbe.failureThreshold Failure threshold for startupProbe ## @param worker.startupProbe.successThreshold Success threshold for startupProbe ## startupProbe: enabled: true initialDelaySeconds: 30 periodSeconds: 10 timeoutSeconds: 5 failureThreshold: 6 successThreshold: 1 ## @param worker.customLivenessProbe Custom livenessProbe that overrides the default one ## customLivenessProbe: {} ## @param worker.customReadinessProbe Custom readinessProbe that overrides the default one ## customReadinessProbe: {} ## @param worker.customStartupProbe Custom startupProbe that overrides the default one ## customStartupProbe: {} ## @param worker.sidecars Add additional sidecar containers to the worker pod(s) ## e.g: ## sidecars: ## - name: your-image-name ## image: your-image ## imagePullPolicy: Always ## ports: ## - name: portname ## containerPort: 1234 ## sidecars: [] ## @param worker.initContainers Add initContainers to the worker pods. ## Example: ## initContainers: ## - name: your-image-name ## image: your-image ## imagePullPolicy: Always ## ports: ## - name: portname ## containerPort: 1234 ## initContainers: [] ## Autoscaling parameters ## @param worker.autoscaling.enabled Enable replica autoscaling depending on CPU ## @param worker.autoscaling.minReplicas Minimum number of worker replicas ## @param worker.autoscaling.maxReplicas Maximum number of worker replicas ## @param worker.autoscaling.targetCPU Target CPU utilization percentage ## @param worker.autoscaling.targetMemory Target Memory utilization percentage ## autoscaling: enabled: false minReplicas: "" maxReplicas: 5 targetCPU: 50 targetMemory: "" ## @section Security parameters ## ## Security configuration ## security: ## @param security.passwordsSecretName Name of the secret that contains all the passwords ## This is optional, by default random passwords are generated ## passwordsSecretName: "" ## RPC configuration ## @param security.rpc.authenticationEnabled Enable the RPC authentication ## @param security.rpc.encryptionEnabled Enable the encryption for RPC ## rpc: authenticationEnabled: false encryptionEnabled: false ## @param security.storageEncryptionEnabled Enables local storage encryption ## storageEncryptionEnabled: false ## @param security.certificatesSecretName Name of the secret that contains the certificates. ## It should contains two keys called "spark-keystore.jks" and "spark-truststore.jks" with the files in JKS format. ## DEPRECATED. Use `security.ssl.existingSecret` instead ## certificatesSecretName: "" ## SSL configuration ## ssl: ## @param security.ssl.enabled Enable the SSL configuration ## enabled: false ## @param security.ssl.needClientAuth Enable the client authentication ## needClientAuth: false ## @param security.ssl.protocol Set the SSL protocol ## protocol: TLSv1.2 ## @param security.ssl.existingSecret Name of the existing secret containing the TLS certificates ## It should contains two keys called "spark-keystore.jks" and "spark-truststore.jks" with the files in JKS format. ## existingSecret: "" ## @param security.ssl.autoGenerated Create self-signed TLS certificates. Currently only supports PEM certificates ## The Spark container will generate a JKS keystore and trustore using the PEM certificates. ## autoGenerated: false ## @param security.ssl.keystorePassword Set the password of the JKS Keystore ## keystorePassword: "" ## @param security.ssl.truststorePassword Truststore password. ## truststorePassword: "" ## Container resource requests and limits ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ ## We usually recommend not to specify default resources and to leave this as a conscious ## choice for the user. This also increases chances charts run on environments with little ## resources, such as Minikube. If you do want to specify resources, uncomment the following ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. ## @param security.ssl.resources.limits The resources limits for the container ## @param security.ssl.resources.requests The requested resources for the container ## resources: ## Example: ## limits: ## cpu: 100m ## memory: 128Mi ## limits: {} ## Examples: ## requests: ## cpu: 100m ## memory: 128Mi ## requests: {} ## @section Traffic Exposure parameters ## ## Service parameters ## service: ## @param service.type Kubernetes Service type ## type: ClusterIP ## @param service.ports.http Spark client port for HTTP ## @param service.ports.https Spark client port for HTTPS ## @param service.ports.cluster Spark cluster port ## ports: http: 80 https: 443 cluster: 7077 ## Specify the nodePort(s) value(s) for the LoadBalancer and NodePort service types. ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport ## @param service.nodePorts.http Kubernetes web node port for HTTP ## @param service.nodePorts.https Kubernetes web node port for HTTPS ## @param service.nodePorts.cluster Kubernetes cluster node port ## nodePorts: http: "" https: "" cluster: "" ## @param service.clusterIP Spark service Cluster IP ## e.g.: ## clusterIP: None ## clusterIP: "" ## @param service.loadBalancerIP Load balancer IP if spark service type is `LoadBalancer` ## Set the LoadBalancer service type to internal only ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer ## loadBalancerIP: "" ## @param service.loadBalancerSourceRanges Spark service Load Balancer sources ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service ## e.g: ## loadBalancerSourceRanges: ## - 10.10.10.0/24 ## loadBalancerSourceRanges: [] ## @param service.externalTrafficPolicy Spark service external traffic policy ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip ## externalTrafficPolicy: Cluster ## @param service.annotations Additional custom annotations for Spark service ## annotations: {} ## @param service.extraPorts Extra ports to expose in Spark service (normally used with the `sidecars` value) ## extraPorts: [] ## @param service.sessionAffinity Control where client requests go, to the same pod or round-robin ## Values: ClientIP or None ## ref: https://kubernetes.io/docs/user-guide/services/ ## sessionAffinity: None ## @param service.sessionAffinityConfig Additional settings for the sessionAffinity ## sessionAffinityConfig: ## clientIP: ## timeoutSeconds: 300 ## sessionAffinityConfig: {} ## Headless service properties ## headless: ## @param service.headless.annotations Annotations for the headless service. ## annotations: {} ## Configure the ingress resource that allows you to access the ## Spark installation. Set up the URL ## ref: https://kubernetes.io/docs/user-guide/ingress/ ## ingress: ## @param ingress.enabled Enable ingress controller resource ## enabled: false ## @param ingress.pathType Ingress path type ## pathType: ImplementationSpecific ## @param ingress.apiVersion Force Ingress API version (automatically detected if not set) ## apiVersion: "" ## @param ingress.hostname Default host for the ingress resource ## hostname: spark.local ## @param ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster . ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/ ## ingressClassName: "" ## @param ingress.path The Path to Spark. You may need to set this to '/*' in order to use this with ALB ingress controllers. ## path: / ## @param ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. ## For a full list of possible ingress annotations, please see ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md ## Use this parameter to set the required annotations for cert-manager, see ## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations ## ## e.g: ## annotations: ## kubernetes.io/ingress.class: nginx ## cert-manager.io/cluster-issuer: cluster-issuer-name ## annotations: {} ## @param ingress.tls Enable TLS configuration for the hostname defined at ingress.hostname parameter ## TLS certificates will be retrieved from a TLS secret with name: {{- printf "%s-tls" .Values.ingress.hostname }} ## You can use the ingress.secrets parameter to create this TLS secret or rely on cert-manager to create it ## tls: false ## @param ingress.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm ## selfSigned: false ## @param ingress.extraHosts The list of additional hostnames to be covered with this ingress record. ## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array ## extraHosts: ## - name: spark.local ## path: / ## extraHosts: [] ## @param ingress.extraPaths Any additional arbitrary paths that may need to be added to the ingress under the main host. ## For example: The ALB ingress controller requires a special rule for handling SSL redirection. ## extraPaths: ## - path: /* ## backend: ## serviceName: ssl-redirect ## servicePort: use-annotation ## extraPaths: [] ## @param ingress.extraTls The tls configuration for additional hostnames to be covered with this ingress record. ## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls ## extraTls: ## - hosts: ## - spark.local ## secretName: spark.local-tls ## extraTls: [] ## @param ingress.secrets If you're providing your own certificates, please use this to add the certificates as secrets ## key and certificate should start with -----BEGIN CERTIFICATE----- or ## -----BEGIN RSA PRIVATE KEY----- ## ## name should line up with a tlsSecret set further up ## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set ## ## It is also possible to create and manage the certificates outside of this helm chart ## Please see README.md for more information ## e.g: ## - name: spark.local-tls ## key: ## certificate: ## secrets: [] ## @param ingress.extraRules Additional rules to be covered with this ingress record ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules ## e.g: ## extraRules: ## - host: spark.local ## http: ## path: / ## backend: ## service: ## name: spark-svc ## port: ## name: http ## extraRules: [] ## @section Other parameters ## ## ServiceAccount configuration ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ ## serviceAccount: ## @param serviceAccount.create Enable the creation of a ServiceAccount for Spark pods ## create: true ## @param serviceAccount.name The name of the ServiceAccount to use. ## If not set and create is true, a name is generated using the spark.fullname template ## name: "" ## @param serviceAccount.annotations Annotations for Spark Service Account ## annotations: {} ## @param serviceAccount.automountServiceAccountToken Automount API credentials for a service account. ## automountServiceAccountToken: true ## @section Metrics parameters ## ## Metrics configuration ## metrics: ## @param metrics.enabled Start a side-car prometheus exporter ## enabled: false ## @param metrics.masterAnnotations [object] Annotations for the Prometheus metrics on master nodes ## masterAnnotations: prometheus.io/scrape: 'true' prometheus.io/path: '/metrics/' prometheus.io/port: '{{ .Values.master.containerPorts.http }}' ## @param metrics.workerAnnotations [object] Annotations for the Prometheus metrics on worker nodes ## workerAnnotations: prometheus.io/scrape: 'true' prometheus.io/path: '/metrics/' prometheus.io/port: '{{ .Values.worker.containerPorts.http }}' ## Prometheus Service Monitor ## ref: https://github.com/coreos/prometheus-operator ## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint ## podMonitor: ## @param metrics.podMonitor.enabled If the operator is installed in your cluster, set to true to create a PodMonitor Resource for scraping metrics using PrometheusOperator ## enabled: false ## @param metrics.podMonitor.extraMetricsEndpoints Add metrics endpoints for monitoring the jobs running in the worker nodes ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#podmetricsendpoint ## e.g: ## - port: myapp ## path: /metrics/ ## extraMetricsEndpoints: [] ## @param metrics.podMonitor.namespace Specify the namespace in which the podMonitor resource will be created ## namespace: "" ## @param metrics.podMonitor.interval Specify the interval at which metrics should be scraped ## interval: 30s ## @param metrics.podMonitor.scrapeTimeout Specify the timeout after which the scrape is ended ## e.g: ## scrapeTimeout: 30s ## scrapeTimeout: "" ## @param metrics.podMonitor.additionalLabels Additional labels that can be used so PodMonitors will be discovered by Prometheus ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec ## additionalLabels: {} ## Custom PrometheusRule to be defined ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions ## prometheusRule: ## @param metrics.prometheusRule.enabled Set this to true to create prometheusRules for Prometheus ## enabled: false ## @param metrics.prometheusRule.namespace Namespace where the prometheusRules resource should be created ## namespace: "" ## @param metrics.prometheusRule.additionalLabels Additional labels that can be used so prometheusRules will be discovered by Prometheus ## additionalLabels: {} ## @param metrics.prometheusRule.rules Custom Prometheus [rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) ## These are just examples rules, please adapt them to your needs. ## Make sure to constraint the rules to the current postgresql service. ## rules: ## - alert: HugeReplicationLag ## expr: pg_replication_lag{service="{{ template "postgresql.fullname" . }}-metrics"} / 3600 > 1 ## for: 1m ## labels: ## severity: critical ## annotations: ## description: replication for {{ template "postgresql.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). ## rules: []