296 lines
6.7 KiB
YAML
296 lines
6.7 KiB
YAML
---
|
|
replicas: 1
|
|
|
|
# Allows you to add any config files in /usr/share/logstash/config/
|
|
# such as logstash.yml and log4j2.properties
|
|
#
|
|
# Note that when overriding logstash.yml, `http.host: 0.0.0.0` should always be included
|
|
# to make default probes work.
|
|
logstashConfig: {}
|
|
# logstash.yml: |
|
|
# key:
|
|
# nestedkey: value
|
|
# log4j2.properties: |
|
|
# key = value
|
|
|
|
# Allows you to add any pipeline files in /usr/share/logstash/pipeline/
|
|
### ***warn*** there is a hardcoded logstash.conf in the image, override it first
|
|
logstashPipeline: {}
|
|
# logstash.conf: |
|
|
# input {
|
|
# exec {
|
|
# command => "uptime"
|
|
# interval => 30
|
|
# }
|
|
# }
|
|
# output { stdout { } }
|
|
|
|
# Allows you to add any pattern files in your custom pattern dir
|
|
logstashPatternDir: "/usr/share/logstash/patterns/"
|
|
logstashPattern: {}
|
|
# pattern.conf: |
|
|
# DPKG_VERSION [-+~<>\.0-9a-zA-Z]+
|
|
|
|
# Extra environment variables to append to this nodeGroup
|
|
# This will be appended to the current 'env:' key. You can use any of the kubernetes env
|
|
# syntax here
|
|
extraEnvs: []
|
|
# - name: MY_ENVIRONMENT_VAR
|
|
# value: the_value_goes_here
|
|
|
|
# Allows you to load environment variables from kubernetes secret or config map
|
|
envFrom: []
|
|
# - secretRef:
|
|
# name: env-secret
|
|
# - configMapRef:
|
|
# name: config-map
|
|
|
|
# Add sensitive data to k8s secrets
|
|
secrets: []
|
|
# - name: "env"
|
|
# value:
|
|
# ELASTICSEARCH_PASSWORD: "LS1CRUdJTiBgUFJJVkFURSB"
|
|
# api_key: ui2CsdUadTiBasRJRkl9tvNnw
|
|
# - name: "tls"
|
|
# value:
|
|
# ca.crt: |
|
|
# LS0tLS1CRUdJT0K
|
|
# LS0tLS1CRUdJT0K
|
|
# LS0tLS1CRUdJT0K
|
|
# LS0tLS1CRUdJT0K
|
|
# cert.crt: "LS0tLS1CRUdJTiBlRJRklDQVRFLS0tLS0K"
|
|
# cert.key.filepath: "secrets.crt" # The path to file should be relative to the `values.yaml` file.
|
|
|
|
# A list of secrets and their paths to mount inside the pod
|
|
secretMounts: []
|
|
|
|
hostAliases: []
|
|
#- ip: "127.0.0.1"
|
|
# hostnames:
|
|
# - "foo.local"
|
|
# - "bar.local"
|
|
|
|
image: "docker.elastic.co/logstash/logstash"
|
|
imageTag: "8.5.1"
|
|
imagePullPolicy: "IfNotPresent"
|
|
imagePullSecrets: []
|
|
|
|
podAnnotations: {}
|
|
|
|
# additionals labels
|
|
labels: {}
|
|
|
|
logstashJavaOpts: "-Xmx1g -Xms1g"
|
|
|
|
resources:
|
|
requests:
|
|
cpu: "100m"
|
|
memory: "1536Mi"
|
|
limits:
|
|
cpu: "1000m"
|
|
memory: "1536Mi"
|
|
|
|
volumeClaimTemplate:
|
|
accessModes: ["ReadWriteOnce"]
|
|
resources:
|
|
requests:
|
|
storage: 1Gi
|
|
|
|
rbac:
|
|
create: false
|
|
serviceAccountAnnotations: {}
|
|
serviceAccountName: ""
|
|
annotations:
|
|
{}
|
|
#annotation1: "value1"
|
|
#annotation2: "value2"
|
|
#annotation3: "value3"
|
|
|
|
podSecurityPolicy:
|
|
create: false
|
|
name: ""
|
|
spec:
|
|
privileged: false
|
|
fsGroup:
|
|
rule: RunAsAny
|
|
runAsUser:
|
|
rule: RunAsAny
|
|
seLinux:
|
|
rule: RunAsAny
|
|
supplementalGroups:
|
|
rule: RunAsAny
|
|
volumes:
|
|
- secret
|
|
- configMap
|
|
- persistentVolumeClaim
|
|
|
|
persistence:
|
|
enabled: false
|
|
annotations: {}
|
|
|
|
extraVolumes:
|
|
[]
|
|
# - name: extras
|
|
# emptyDir: {}
|
|
|
|
extraVolumeMounts:
|
|
[]
|
|
# - name: extras
|
|
# mountPath: /usr/share/extras
|
|
# readOnly: true
|
|
|
|
extraContainers:
|
|
[]
|
|
# - name: do-something
|
|
# image: busybox
|
|
# command: ['do', 'something']
|
|
|
|
extraInitContainers:
|
|
[]
|
|
# - name: do-something
|
|
# image: busybox
|
|
# command: ['do', 'something']
|
|
|
|
# This is the PriorityClass settings as defined in
|
|
# https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
|
|
priorityClassName: ""
|
|
|
|
# By default this will make sure two pods don't end up on the same node
|
|
# Changing this to a region would allow you to spread pods across regions
|
|
antiAffinityTopologyKey: "kubernetes.io/hostname"
|
|
|
|
# Hard means that by default pods will only be scheduled if there are enough nodes for them
|
|
# and that they will never end up on the same node. Setting this to soft will do this "best effort"
|
|
antiAffinity: "hard"
|
|
|
|
# This is the node affinity settings as defined in
|
|
# https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
|
|
nodeAffinity: {}
|
|
|
|
# This is inter-pod affinity settings as defined in
|
|
# https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
|
|
podAffinity: {}
|
|
|
|
# The default is to deploy all pods serially. By setting this to parallel all pods are started at
|
|
# the same time when bootstrapping the cluster
|
|
podManagementPolicy: "Parallel"
|
|
|
|
httpPort: 9600
|
|
|
|
# Custom ports to add to logstash
|
|
extraPorts:
|
|
[]
|
|
# - name: beats
|
|
# containerPort: 5001
|
|
|
|
updateStrategy: RollingUpdate
|
|
|
|
# This is the max unavailable setting for the pod disruption budget
|
|
# The default value of 1 will make sure that kubernetes won't allow more than 1
|
|
# of your pods to be unavailable during maintenance
|
|
maxUnavailable: 1
|
|
|
|
podSecurityContext:
|
|
fsGroup: 1000
|
|
runAsUser: 1000
|
|
|
|
securityContext:
|
|
capabilities:
|
|
drop:
|
|
- ALL
|
|
# readOnlyRootFilesystem: true
|
|
runAsNonRoot: true
|
|
runAsUser: 1000
|
|
|
|
# How long to wait for logstash to stop gracefully
|
|
terminationGracePeriod: 120
|
|
|
|
# Probes
|
|
# Default probes are using `httpGet` which requires that `http.host: 0.0.0.0` is part of
|
|
# `logstash.yml`. If needed probes can be disabled or overridden using the following syntaxes:
|
|
#
|
|
# disable livenessProbe
|
|
# livenessProbe: null
|
|
#
|
|
# replace httpGet default readinessProbe by some exec probe
|
|
# readinessProbe:
|
|
# httpGet: null
|
|
# exec:
|
|
# command:
|
|
# - curl
|
|
# - localhost:9600
|
|
|
|
livenessProbe:
|
|
httpGet:
|
|
path: /
|
|
port: http
|
|
initialDelaySeconds: 300
|
|
periodSeconds: 10
|
|
timeoutSeconds: 5
|
|
failureThreshold: 3
|
|
successThreshold: 1
|
|
|
|
readinessProbe:
|
|
httpGet:
|
|
path: /
|
|
port: http
|
|
initialDelaySeconds: 60
|
|
periodSeconds: 10
|
|
timeoutSeconds: 5
|
|
failureThreshold: 3
|
|
successThreshold: 3
|
|
|
|
## Use an alternate scheduler.
|
|
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
|
|
##
|
|
schedulerName: ""
|
|
|
|
nodeSelector: {}
|
|
tolerations: []
|
|
|
|
nameOverride: ""
|
|
fullnameOverride: ""
|
|
|
|
lifecycle:
|
|
{}
|
|
# preStop:
|
|
# exec:
|
|
# command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
|
|
# postStart:
|
|
# exec:
|
|
# command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
|
|
|
|
service:
|
|
{}
|
|
# annotations: {}
|
|
# type: ClusterIP
|
|
# loadBalancerIP: ""
|
|
# ports:
|
|
# - name: beats
|
|
# port: 5044
|
|
# protocol: TCP
|
|
# targetPort: 5044
|
|
# - name: http
|
|
# port: 8080
|
|
# protocol: TCP
|
|
# targetPort: 8080
|
|
|
|
ingress:
|
|
enabled: false
|
|
annotations:
|
|
{}
|
|
# kubernetes.io/tls-acme: "true"
|
|
className: "nginx"
|
|
pathtype: ImplementationSpecific
|
|
hosts:
|
|
- host: logstash-example.local
|
|
paths:
|
|
- path: /beats
|
|
servicePort: 5044
|
|
- path: /http
|
|
servicePort: 8080
|
|
tls: []
|
|
# - secretName: logstash-example-tls
|
|
# hosts:
|
|
# - logstash-example.local
|