# An API key is required to connect to the Speedscale cloud.
# If you need a key email support@speedscale.com.
apiKey: ""

# A secret name can be referenced instead of the api key itself.
# The secret must be of the format:
#
# type: Opaque
# data:
#   SPEEDSCALE_API_KEY: <key>
#   SPEEDSCALE_APP_URL: <appUrl>
apiKeySecret: ""

# Speedscale domain to use.
appUrl: "app.speedscale.com"

# The name of your cluster.
clusterName: "my-cluster"

# Speedscale components image settings.
image:
  registry: gcr.io/speedscale
  tag: v1.3.541
  pullPolicy: Always

# Log level for Speedscale components.
logLevel: "info"

# Namespaces to be watched by Speedscale Operator as a list of names.
namespaceSelector: []

# Instructs operator to deploy resources necessary to interact with your cluster from the Speedscale dashboard.
dashboardAccess: true

# Filter Rule to apply to the Speedscale Forwarder
filterRule: "standard"

# Data Loss Prevention settings.
dlp:
  # Instructs operator to enable data loss prevention features
  enabled: false

  # Configuration for data loss prevention
  config: "standard"

# If the operator pod/webhooks need to be on the host network.
# This is only needed if the control plane cannot connect directly to a pod
# for eg. if Calico is used as EKS's default networking
# https://docs.tigera.io/calico/3.25/getting-started/kubernetes/managed-public-cloud/eks#install-eks-with-calico-networking
hostNetwork: false

# Example:
# annotation.first: value
# annotation.second: value
globalAnnotations: {}

# A full affinity object as detailed: https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity
affinity: {}

# The list of tolerations as detailed: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
tolerations: []

# A nodeselector object as detailed: https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes/
nodeSelector: {}

# Deploy a demo app at startup.
deployDemo: "java"

# Operator settings. These limits are recommended unless you have a cluster
# with a very large number of workloads (for eg. 10k+ deployments, replicasets, etc.).
operator:
  resources:
    limits:
      cpu: 500m
      memory: 512Mi
    requests:
      cpu: 100m
      memory: 128Mi