Merge pull request #536 from samuelattwood/sumologic

Adding SumoLogic
pull/546/head
Samuel Attwood 2022-10-24 14:50:24 -04:00 committed by GitHub
commit 39e7fc3fc3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
528 changed files with 111918 additions and 0 deletions

Binary file not shown.

View File

@ -0,0 +1,22 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,24 @@
dependencies:
- name: fluent-bit
repository: https://fluent.github.io/helm-charts
version: 0.20.2
- name: kube-prometheus-stack
repository: https://prometheus-community.github.io/helm-charts
version: 12.10.0
- name: falco
repository: https://falcosecurity.github.io/charts
version: 1.18.6
- name: metrics-server
repository: https://charts.bitnami.com/bitnami
version: 5.11.9
- name: telegraf-operator
repository: https://helm.influxdata.com/
version: 1.3.5
- name: tailing-sidecar-operator
repository: https://sumologic.github.io/tailing-sidecar
version: 0.3.4
- name: opentelemetry-operator
repository: https://open-telemetry.github.io/opentelemetry-helm-charts
version: 0.7.0
digest: sha256:da79b29a1e6b366c6947f3b9b0d5948badc60c51d55fb1dac5e76fc0ffcd4a44
generated: "2022-09-15T08:31:35.45416754Z"

View File

@ -0,0 +1,48 @@
annotations:
catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: Sumo Logic
catalog.cattle.io/kube-version: '>=1.18-0'
catalog.cattle.io/release-name: sumologic
apiVersion: v2
appVersion: 2.17.0
dependencies:
- condition: fluent-bit.enabled,sumologic.logs.enabled
name: fluent-bit
repository: file://./charts/fluent-bit
version: 0.20.2
- condition: kube-prometheus-stack.enabled,sumologic.metrics.enabled
name: kube-prometheus-stack
repository: file://./charts/kube-prometheus-stack
version: 12.10.0
- condition: falco.enabled
name: falco
repository: file://./charts/falco
version: 1.18.6
- condition: metrics-server.enabled
name: metrics-server
repository: file://./charts/metrics-server
version: 5.11.9
- condition: telegraf-operator.enabled
name: telegraf-operator
repository: file://./charts/telegraf-operator
version: 1.3.5
- condition: tailing-sidecar-operator.enabled
name: tailing-sidecar-operator
repository: file://./charts/tailing-sidecar-operator
version: 0.3.4
- condition: opentelemetry-operator.enabled
name: opentelemetry-operator
repository: file://./charts/opentelemetry-operator
version: 0.7.0
description: A Helm chart for collecting Kubernetes logs, metrics, traces and events
into Sumo Logic.
home: https://github.com/SumoLogic/sumologic-kubernetes-collection
icon: https://raw.githubusercontent.com/SumoLogic/sumologic-kubernetes-collection/main/images/sumo_logic_logo.png
keywords:
- monitoring
- logging
name: sumologic
sources:
- https://github.com/SumoLogic/sumologic-kubernetes-collection
type: application
version: 2.17.0

View File

@ -0,0 +1,404 @@
# Configuration
To see all available configuration for our sub-charts, please refer to their documentation.
- [Falco](https://github.com/falcosecurity/charts/tree/master/falco#configuration) - All Falco properties should be prefixed with `falco.` in our values.yaml to override a property not listed below.
- [Kube-Prometheus-Stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack#configuration) - All Kube Prometheus Stack properties should be prefixed with `kube-prometheus-stack.` in our values.yaml to override a property not listed below.
- [Fluent Bit](https://github.com/fluent/helm-charts/blob/main/charts/fluent-bit/values.yaml) - All Fluent Bit properties should be prefixed with `fluent-bit.` in our values.yaml to override a property not listed below.
- [Metrics Server](https://github.com/bitnami/charts/tree/master/bitnami/metrics-server/#parameters) - All Metrics Server properties should be prefixed with `metrics-server.` in our values.yaml to override a property not listed below.
- [Tailing Sidecar Operator](https://github.com/SumoLogic/tailing-sidecar/tree/main/helm/tailing-sidecar-operator#configuration) -
All Tailing Sidecar Operator properties should be prefixed with `tailing-sidecar-operator` in our values.yaml to
override a property not listed below.
- [OpenTelemetry Operator](https://github.com/open-telemetry/opentelemetry-helm-charts/tree/main/charts/opentelemetry-operator#opentelemetry-operator-helm-chart) -
All OpenTelemetry Operator properties should be prefixed with `opentelemetry-operator` in our values.yaml to
override a property listed below.
The following table lists the configurable parameters of the Sumo Logic chart and their default values.
| Parameter | Description | Default |
|---------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------|
| `nameOverride` | Used to override the Chart name. | `Nil` |
| `fullnameOverride` | Used to override the chart's full name. | `Nil` |
| `sumologic.setupEnabled` | If enabled, a pre-install hook will create Collector and Sources in Sumo Logic. | `true` |
| `sumologic.cleanupEnabled` | If enabled, a pre-delete hook will destroy Kubernetes secret and Sumo Logic Collector. | `false` |
| `sumologic.events.enabled` | Defines whether collection of Kubernetes events is enabled. | `true` |
| `sumologic.events.provider` | Defines which provider is used for Kubernetes events collection. This can be either `fluentd` or `otelcol`. | `fluentd` |
| `sumologic.logs.enabled` | Set the enabled flag to false for disabling logs ingestion altogether. | `true` |
| `sumologic.metrics.enabled` | Set the enabled flag to false for disabling metrics ingestion altogether. | `true` |
| `sumologic.logs.fields` | Fields to be created at Sumo Logic to ensure logs are tagged with relevant metadata. [Sumo Logic help](https://help.sumologic.com/Manage/Fields#Manage_fields) | `{}` |
| `sumologic.logs.metadata.provider` | Set provider to use for logs forwarding and metadata enrichment. Can be either otelcol or fluentd. | `fluentd` |
| `sumologic.metrics.metadata.provider` | Set provider to use for metrics forwarding and metadata enrichment. Can be either otelcol or fluentd. | `fluentd` |
| `sumologic.metrics.remoteWriteProxy.enabled` | Enable a load balancing proxy for Prometheus remote writes. [See docs/Best_Practices.md for more information.](../../docs/Best_Practices.md) | `fluentd` |
| `sumologic.metrics.remoteWriteProxy.config.clientBodyBufferSize` | See the [nginx documentation](http://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_buffer_size). Increase if you've also increased samples per send in Prometheus remote write. | `fluentd` |
| `sumologic.metrics.remoteWriteProxy.config.workerCountAutotune` | This feature autodetects how much CPU is assigned to the nginx instance and setsthe right amount of workers based on that. Disable to use the default of 8 workers. | `false` |
| `sumologic.metrics.remoteWriteProxy.replicaCount` | Number of replicas in the remote write proxy deployment. | `fluentd` |
| `sumologic.metrics.remoteWriteProxy.image` | Nginx docker image for the remote write proxy. | `fluentd` |
| `sumologic.metrics.remoteWriteProxy.resources` | Resource requests and limits for the remote write proxy container. | `fluentd` |
| `sumologic.metrics.remoteWriteProxy.livenessProbe` | Liveness probe settings for the remote write proxy container. | `fluentd` |
| `sumologic.metrics.remoteWriteProxy.readinessProbe` | Readiness probe settings for the remote write proxy container. | `fluentd` |
| `sumologic.metrics.remoteWriteProxy.securityContext` | The securityContext configuration for the remote write proxy. | `{}` |
| `sumologic.metrics.remoteWriteProxy.nodeSelector` | Node selector for the remote write proxy deployment. [See docs/Best_Practices.md for more information.](../../docs/Best_Practices.md) | `{}` |
| `sumologic.metrics.remoteWriteProxy.tolerations` | Tolerations for the remote write proxy deployment. | `[]` |
| `sumologic.metrics.remoteWriteProxy.affinity` | Affinity for the remote write proxy deployment. | `{}` |
| `sumologic.metrics.remoteWriteProxy.priorityClassName` | Priority class name for the remote write proxy deployment. | `Nil` |
| `sumologic.metrics.remoteWriteProxy.podLabels` | Additional labels for the remote write proxy container. | `{}` |
| `sumologic.metrics.remoteWriteProxy.podAnnotations` | Additional annotations for for the remote write proxy container. | `{}` |
| `sumologic.traces.enabled` | Set the enabled flag to true to enable tracing ingestion. _Tracing must be enabled for the account first. Please contact your Sumo representative for activation details_ | `false` |
| `sumologic.envFromSecret` | If enabled, accessId and accessKey will be sourced from Secret Name given. Be sure to include at least the following env variables in your secret (1) SUMOLOGIC_ACCESSID, (2) SUMOLOGIC_ACCESSKEY | `sumo-api-secret` |
| `sumologic.accessId` | Sumo access ID. | `Nil` |
| `sumologic.accessKey` | Sumo access key. | `Nil` |
| `sumologic.endpoint` | Sumo API endpoint; Leave blank for automatic endpoint discovery and redirection. | `Nil` |
| `sumologic.collectionMonitoring` | | `false` |
| `sumologic.collectorName` | The name of the Sumo Logic collector that will be created in the SetUp job. Defaults to `clusterName` if not specified. | `Nil` |
| `sumologic.clusterName` | An identifier for the Kubernetes cluster. Whitespaces in the cluster name will be replaced with dashes. | `kubernetes` |
| `sumologic.collector.sources` | Configuration of HTTP sources. [See docs/Terraform.md for more information](../../docs/Terraform.md).. | See [values.yaml] |
| `sumologic.httpProxy` | HTTP proxy URL | `Nil` |
| `sumologic.httpsProxy` | HTTPS proxy URL | `Nil` |
| `sumologic.noProxy` | List of comma separated hostnames which should be excluded from the proxy | `kubernetes.default.svc` |
| `sumologic.pullSecrets` | Optional list of secrets that will be used for pulling images for Sumo Logic's deployments and statefulsets. | `Nil` |
| `sumologic.podLabels` | Additional labels for the pods. | `{}` |
| `sumologic.podAnnotations` | Additional annotations for the pods. | `{}` |
| `sumologic.scc.create` | Create OpenShift's Security Context Constraint | `false` |
| `sumologic.serviceAccount.annotations` | Add custom annotations to sumologic serviceAccounts | `{}` |
| `sumologic.setup.job.pullSecrets` | Optional list of secrets that will be used for pulling images for Sumo Logic's setup job. | `Nil` |
| `sumologic.setup.job.podLabels` | Additional labels for the setup Job pod. | `{}` |
| `sumologic.setup.job.podAnnotations` | Additional annotations for the setup Job pod. | `{}` |
| `sumologic.setup.job.image.repository` | Image repository for Sumo Logic setup job docker container. | `sumologic/kubernetes-fluentd` |
| `sumologic.setup.job.image.tag` | Image tag for Sumo Logic setup job docker container. | `1.3.0` |
| `sumologic.setup.job.image.pullPolicy` | Image pullPolicy for Sumo Logic docker container. | `IfNotPresent` |
| `sumologic.setup.job.nodeSelector` | Node selector for sumologic setup job. [See docs/Best_Practices.md for more information.](../../docs/Best_Practices.md) | `IfNotPresent` |
| `sumologic.setup.job.tolerations` | Add tolerations for the setup Job. | `[]` |
| `sumologic.setup.job.affinity` | Add affinity and anti-affinity for the setup Job. | `{}` |
| `sumologic.setup.monitors.enabled` | If enabled, a pre-install hook will create k8s monitors in Sumo Logic. | `true` |
| `sumologic.setup.monitors.monitorStatus` | The installed monitors default status: enabled/disabled. | `enabled` |
| `sumologic.setup.monitors.notificationEmails` | A list of emails to send notifications from monitors. | `[]` |
| `sumologic.setup.dashboards.enabled` | If enabled, a pre-install hook will install k8s dashboards in Sumo Logic. | `true` |
| `fluentd.image.repository` | Image repository for Sumo Logic docker container. | `sumologic/kubernetes-fluentd` |
| `fluentd.image.tag` | Image tag for Sumo Logic docker container. | `1.3.0` |
| `fluentd.image.pullPolicy` | Image pullPolicy for Sumo Logic docker container. | `IfNotPresent` |
| `fluentd.logLevelFilter` | Do not send fluentd logs if set to `true`. | `true` |
| `fluentd.additionalPlugins` | Additional Fluentd plugins to install from RubyGems. Please see our [documentation](../../docs/Additional_Fluentd_Plugins.md) for more information. | `[]` |
| `fluentd.compression.enabled` | Flag to control if data is sent to Sumo Logic compressed or not | `true` |
| `fluentd.compression.encoding` | Specifies which encoding should be used to compress data (either `gzip` or `deflate`) | `gzip` |
| `fluentd.logLevel` | Sets the fluentd log level. The default log level, if not specified, is info. Sumo will only ingest the error log level and some specific warnings, the info logs can be seen in kubectl logs. | `info` |
| `fluentd.verifySsl` | Verify SumoLogic HTTPS certificates. | `true` |
| `fluentd.proxyUri` | Proxy URI for sumologic output plugin. | `Nil` |
| `fluentd.securityContext` | The securityContext configuration for Fluentd | `{"fsGroup":999}` |
| `fluentd.podLabels` | Additional labels for all fluentd pods | `{}` |
| `fluentd.pvcLabels` | Additional labels for all fluentd PVCs | `{}` |
| `fluentd.podAnnotations` | Additional annotations for all fluentd pods | `{}` |
| `fluentd.podSecurityPolicy.create` | If true, create & use `podSecurityPolicy` for fluentd resources | `false` |
| `fluentd.persistence.enabled` | Persist data to a persistent volume; When enabled, fluentd uses the file buffer instead of memory buffer. After changing this value follow steps described in [Fluentd Persistence](../../docs/FluentdPersistence.md). | `true` |
| `fluentd.persistence.storageClass` | If defined, storageClassName: <storageClass>. If set to "-", storageClassName: "", which disables dynamic provisioning. If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. (gp2 on AWS, standard on GKE, Azure & OpenStack) | `Nil` |
| `fluentd.persistence.accessMode` | The accessMode for persistence. | `ReadWriteOnce` |
| `fluentd.persistence.size` | The size needed for persistence. | `10Gi` |
| `fluentd.buffer.type` | Option to specify the Fluentd buffer as file/memory. If `fluentd.persistence.enabled` is `true`, this will be ignored. | `memory` |
| `fluentd.buffer.flushInterval` | How frequently to push logs to Sumo Logic. | `5s` |
| `fluentd.buffer.numThreads` | Increase number of http threads to Sumo. May be required in heavy logging/high DPM clusters. | `8` |
| `fluentd.buffer.chunkLimitSize` | The max size of each chunks: events will be written into chunks until the size of chunks become this size. | `1m` |
| `fluentd.buffer.queueChunkLimitSize` | Limit the number of queued chunks. | `128` |
| `fluentd.buffer.totalLimitSize` | The size limitation of this buffer plugin instance. | `128m` |
| `fluentd.buffer.filePaths` | File paths to buffer to, if Fluentd buffer type is specified as file above. Each sumologic output plugin buffers to its own unique file. | See [values.yaml] |
| `fluentd.buffer.extraConf` | Additional config for buffer settings | `Nil` |
| `fluentd.metadata.addOwners` | Option to control the enrichment of logs and metrics with pod owner metadata like `daemonset`, `deployment`, `replicaset`, `statefulset`. | `true` |
| `fluentd.metadata.addService` | Option to control the enrichment of logs and metrics with `service` metadata. | `true` |
| `fluentd.metadata.annotation_match` | Option to control capturing of annotations by metadata filter plugin. | `['sumologic\.com.*']` |
| `fluentd.metadata.apiGroups` | List of supported kubernetes API groups. | `['apps/v1']` |
| `fluentd.metadata.apiServerUrl` | Option to specify custom API server URL instead of the default, that is taken from KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT environment variables. Example: `"https://kubernetes.default.svc:443"`. | `""` |
| `fluentd.metadata.coreApiVersions` | List of supported kubernetes API versions. | `['v1']` |
| `fluentd.metadata.cacheSize` | Option to control the enabling of metadata filter plugin cache_size. | `10000` |
| `fluentd.metadata.cacheTtl` | Option to control the enabling of metadata filter plugin cache_ttl (in seconds). | `7200` |
| `fluentd.metadata.cacheRefresh` | Option to control the interval at which metadata cache is asynchronously refreshed (in seconds). | `3600` |
| `fluentd.metadata.cacheRefreshVariation` | Option to control the variation in seconds by which the cacheRefresh option is changed for each pod separately. For example, if cache refresh is 1 hour and variation is 15 minutes, then actual cache refresh interval will be a random value between 45 minutes and 1 hour 15 minutes, different for each pod. Setting this to 0 disables cache refresh variation. | `900` |
| `fluentd.metadata.cacheRefreshApiserverRequestDelay` | Option to control the delay with which cache refresh calls hit the api server.For example, if 0 then all metadata enrichment happen immediately. Setting this to a non-zero values ensures the traffic to api server is more distributed. | `0` |
| `fluentd.metadata.cacheRefreshExcludePodRegex` | Option to add regex for selectively disabling refresh for metadata in fluentd cache. For example, if regex is `(command-[a-z0-9]*)` then all pods starting with name `command` will not have their metadata refreshed and will be cleaned up from cache | `''` |
| `fluentd.metadata.pluginLogLevel` | Option to give plugin specific log level. | `error` |
| `fluentd.logs.enabled` | Flag to control deploying the Fluentd logs statefulsets. | `true` |
| `fluentd.logs.podDisruptionBudget` | Pod Disruption Budget for logs metadata enrichment. statefulset. | `{"minAvailable": 2}` |
| `fluentd.logs.statefulset.nodeSelector` | Node selector for Fluentd log statefulset. [See docs/Best_Practices.md for more information.](../../docs/Best_Practices.md) | `{}` |
| `fluentd.logs.statefulset.tolerations` | Tolerations for Fluentd log statefulset. | `[]` |
| `fluentd.logs.statefulset.affinity` | Affinity for Fluentd log statefulset. | `{}` |
| `fluentd.logs.statefulset.podAntiAffinity` | PodAntiAffinity for Fluentd log statefulset. | `soft` |
| `fluentd.logs.statefulset.topologySpreadConstraints` | TopologySpreadConstraints for Fluentd logs metadata enrichment statefulset. | `[]` |
| `fluentd.logs.statefulset.replicaCount` | Replica count for Fluentd log statefulset. | `3` |
| `fluentd.logs.statefulset.resources` | Resources for Fluentd log statefulset. | `{"limits":{"cpu":1,"memory":"1Gi"},"requests":{"cpu":0.5,"memory":"768Mi"}}` |
| `fluentd.logs.statefulset.podLabels` | Additional labels for fluentd log pods. | `{}` |
| `fluentd.logs.statefulset.podAnnotations` | Additional annotations for fluentd log pods. | `{}` |
| `fluentd.logs.statefulset.priorityClassName` | Priority class name for fluentd log pods. | `Nil` |
| `fluentd.logs.statefulset.initContainers` | Define init containers that will be run for fluentd logs statefulset. | `[]` |
| `fluentd.logs.autoscaling.enabled` | Option to turn autoscaling on for fluentd and specify params for HPA. Autoscaling needs metrics-server to access cpu metrics. | `false` |
| `fluentd.logs.autoscaling.minReplicas` | Default min replicas for autoscaling. | `3` |
| `fluentd.logs.autoscaling.maxReplicas` | Default max replicas for autoscaling. | `10` |
| `fluentd.logs.autoscaling.targetCPUUtilizationPercentage` | The desired target CPU utilization for autoscaling. | `50` |
| `fluentd.logs.autoscaling.targetMemoryUtilizationPercentage` | The desired target memory utilization for autoscaling. | `Nil` |
| `fluentd.logs.rawConfig` | Default log configuration. | `@include common.conf @include logs.conf` |
| `fluentd.logs.output.logFormat` | Format to post logs into Sumo: fields, json, json_merge, or text. | `fields` |
| `fluentd.logs.output.addTimestamp` | Option to control adding timestamp to logs. | `true` |
| `fluentd.logs.output.timestampKey` | Field name when add_timestamp is on. | `timestamp` |
| `fluentd.logs.output.pluginLogLevel` | Option to give plugin specific log level. | `error` |
| `fluentd.logs.output.extraConf` | Additional config parameters for sumologic output plugin | `Nil` |
| `fluentd.logs.extraLogs` | Additional config for custom log pipelines. | `Nil` |
| `fluentd.logs.containers.overrideRawConfig` | To override the entire contents of logs.source.containers.conf file. Leave empty for the default pipeline. | `Nil` |
| `fluentd.logs.containers.outputConf` | Default output configuration for container logs. | `@include logs.output.conf` |
| `fluentd.logs.containers.overrideOutputConf` | Override output section for container logs. Leave empty for the default output section. | `Nil` |
| `fluentd.logs.containers.sourceName` | Set the _sourceName metadata field in Sumo Logic. | `%{namespace}.%{pod}.%{container}` |
| `fluentd.logs.containers.sourceCategory` | Set the _sourceCategory metadata field in Sumo Logic. | `%{namespace}/%{pod_name}` |
| `fluentd.logs.containers.sourceCategoryPrefix` | Set the prefix, for _sourceCategory metadata. | `kubernetes/` |
| `fluentd.logs.containers.sourceCategoryReplaceDash` | Used to replace - with another character. | `/` |
| `fluentd.logs.containers.excludeContainerRegex` | A regular expression for containers. Matching containers will be excluded from Sumo. The logs will still be sent to FluentD. | `Nil` |
| `fluentd.logs.containers.excludeHostRegex` | A regular expression for hosts. Matching hosts will be excluded from Sumo. The logs will still be sent to FluentD. | `Nil` |
| `fluentd.logs.containers.excludeNamespaceRegex` | A regular expression for namespaces. Matching namespaces will be excluded from Sumo. The logs will still be sent to FluentD. | `Nil` |
| `fluentd.logs.containers.excludePodRegex` | A regular expression for pods. Matching pods will be excluded from Sumo. The logs will still be sent to FluentD. | `Nil` |
| `fluentd.logs.containers.k8sMetadataFilter.watch` | Option to control the enabling of metadata filter plugin watch. | `true` |
| `fluentd.logs.containers.k8sMetadataFilter.caFile` | path to CA file for Kubernetes server certificate validation. | `Nil` |
| `fluentd.logs.containers.k8sMetadataFilter.verifySsl` | Validate SSL certificates. | `true` |
| `fluentd.logs.containers.k8sMetadataFilter.clientCert` | Path to a client cert file to authenticate to the API server. | `Nil` |
| `fluentd.logs.containers.k8sMetadataFilter.clientKey` | Path to a client key file to authenticate to the API server. | `Nil` |
| `fluentd.logs.containers.k8sMetadataFilter.bearerTokenFile` | Path to a file containing the bearer token to use for authentication. | `Nil` |
| `fluentd.logs.containers.k8sMetadataFilter.tagToMetadataRegexp` | The regular expression used to extract kubernetes metadata (pod name, container name, namespace) from the current fluentd tag. | `.+?\.containers\.(?<pod_name>[^_]+)_(?<namespace>[^_]+)_(?<container_name>.+)-(?<docker_id>[a-z0-9]{64})\.log$` |
| `fluentd.logs.containers.extraFilterPluginConf` | To use additional filter plugins. | `Nil` |
| `fluentd.logs.containers.extraOutputPluginConf` | To use additional output plugins. | `Nil` |
| `fluentd.logs.containers.perContainerAnnotationsEnabled` | Enable container-level pod annotations. See [fluent-plugin-kubernetes-sumologic documentation](https://github.com/SumoLogic/sumologic-kubernetes-fluentd/tree/v1.12.2-sumo-6/fluent-plugin-kubernetes-sumologic#container-level-pod-annotations_) for more details. | `false` |
| `fluentd.logs.input.forwardExtraConf` | Configuration for the forward input plugin that receives logs from FluentBit. | `` |
| `fluentd.logs.kubelet.enabled` | Collect kubelet logs. | `true` |
| `fluentd.logs.kubelet.extraFilterPluginConf` | To use additional filter plugins. | `Nil` |
| `fluentd.logs.kubelet.extraOutputPluginConf` | To use additional output plugins. | `Nil` |
| `fluentd.logs.kubelet.outputConf` | Output configuration for kubelet. | `@include logs.output.conf` |
| `fluentd.logs.kubelet.overrideOutputConf` | Override output section for kubelet logs. Leave empty for the default output section. | `Nil` |
| `fluentd.logs.kubelet.sourceName` | Set the _sourceName metadata field in Sumo Logic. | `k8s_kubelet` |
| `fluentd.logs.kubelet.sourceCategory` | Set the _sourceCategory metadata field in Sumo Logic. | `kubelet` |
| `fluentd.logs.kubelet.sourceCategoryPrefix` | Set the prefix, for _sourceCategory metadata. | `kubernetes/` |
| `fluentd.logs.kubelet.sourceCategoryReplaceDash` | Used to replace - with another character. | `/` |
| `fluentd.logs.kubelet.excludeFacilityRegex` | A regular expression for facility. Matching facility will be excluded from Sumo. The logs will still be sent to FluentD. | `Nil` |
| `fluentd.logs.kubelet.excludeHostRegex` | A regular expression for hosts. Matching hosts will be excluded from Sumo. The logs will still be sent to FluentD. | `Nil` |
| `fluentd.logs.kubelet.excludePriorityRegex` | A regular expression for priority. Matching priority will be excluded from Sumo. The logs will still be sent to FluentD. | `Nil` |
| `fluentd.logs.kubelet.excludeUnitRegex` | A regular expression for unit. Matching unit will be excluded from Sumo. The logs will still be sent to FluentD. | `Nil` |
| `fluentd.logs.systemd.enabled` | Collect systemd logs. | `true` |
| `fluentd.logs.systemd.extraFilterPluginConf` | To use additional filter plugins. | `Nil` |
| `fluentd.logs.systemd.extraOutputPluginConf` | To use additional output plugins. | `Nil` |
| `fluentd.logs.systemd.outputConf` | Output configuration for systemd. | `@include logs.output.conf` |
| `fluentd.logs.systemd.overrideOutputConf` | Override output section for systemd logs. Leave empty for the default output section. | `Nil` |
| `fluentd.logs.systemd.sourceCategory` | Set the _sourceCategory metadata field in Sumo Logic. | `system` |
| `fluentd.logs.systemd.sourceCategoryPrefix` | Set the prefix, for _sourceCategory metadata. | `kubernetes/` |
| `fluentd.logs.systemd.sourceCategoryReplaceDash` | Used to replace - with another character. | `/` |
| `fluentd.logs.systemd.excludeFacilityRegex` | A regular expression for facility. Matching facility will be excluded from Sumo. The logs will still be sent to FluentD. | `Nil` |
| `fluentd.logs.systemd.excludeHostRegex` | A regular expression for hosts. Matching hosts will be excluded from Sumo. The logs will still be sent to FluentD. | `Nil` |
| `fluentd.logs.systemd.excludePriorityRegex` | A regular expression for priority. Matching priority will be excluded from Sumo. The logs will still be sent to FluentD. | `Nil` |
| `fluentd.logs.systemd.excludeUnitRegex` | A regular expression for unit. Matching unit will be excluded from Sumo. The logs will still be sent to FluentD. | `Nil` |
| `fluentd.logs.default.extraFilterPluginConf` | To use additional filter plugins. | `Nil` |
| `fluentd.logs.default.extraOutputPluginConf` | To use additional output plugins. | `Nil` |
| `fluentd.logs.default.outputConf` | Default log configuration (catch-all). | `@include logs.output.conf` |
| `fluentd.logs.default.overrideOutputConf` | Override output section for untagged logs. Leave empty for the default output section. | `Nil` |
| `fluentd.metrics.enabled` | Flag to control deploying the Fluentd metrics statefulsets. | `true` |
| `fluentd.metrics.podDisruptionBudget` | Pod Disruption Budget for metrics metadata enrichment. statefulset. | `{"minAvailable": 2}` |
| `fluentd.metrics.statefulset.nodeSelector` | Node selector for Fluentd metrics statefulset. [See docs/Best_Practices.md for more information.](../../docs/Best_Practices.md) | `{}` |
| `fluentd.metrics.statefulset.tolerations` | Tolerations for Fluentd metrics statefulset. | `[]` |
| `fluentd.metrics.statefulset.affinity` | Affinity for Fluentd metrics statefulset. | `{}` |
| `fluentd.metrics.statefulset.podAntiAffinity` | PodAntiAffinity for Fluentd metrics statefulset. | `soft` |
| `fluentd.metrics.statefulset.topologySpreadConstraints` | TopologySpreadConstraints for Fluentd metrics metadata enrichment statefulset. | `[]` |
| `fluentd.metrics.statefulset.replicaCount` | Replica count for Fluentd metrics statefulset. | `3` |
| `fluentd.metrics.statefulset.resources` | Resources for Fluentd metrics statefulset. | `{"limits":{"cpu":1,"memory":"1Gi"},"requests":{"cpu":0.5,"memory":"768Mi"}}` |
| `fluentd.metrics.statefulset.podLabels` | Additional labels for fluentd metrics pods. | `{}` |
| `fluentd.metrics.statefulset.podAnnotations` | Additional annotations for fluentd metrics pods. | `{}` |
| `fluentd.metrics.statefulset.priorityClassName` | Priority class name for fluentd metrics pods. | `Nil` |
| `fluentd.metrics.statefulset.initContainers` | Define init containers that will be run for fluentd metrics statefulset. | `[]` |
| `fluentd.metrics.autoscaling.enabled` | Option to turn autoscaling on for fluentd and specify params for HPA. Autoscaling needs metrics-server to access cpu metrics. | `false` |
| `fluentd.metrics.autoscaling.minReplicas` | Default min replicas for autoscaling. | `3` |
| `fluentd.metrics.autoscaling.maxReplicas` | Default max replicas for autoscaling. | `10` |
| `fluentd.metrics.autoscaling.targetCPUUtilizationPercentage` | The desired target CPU utilization for autoscaling. | `50` |
| `fluentd.metrics.autoscaling.targetMemoryUtilizationPercentage` | The desired target memory utilization for autoscaling. | `Nil` |
| `fluentd.metrics.rawConfig` | Raw config for fluentd metrics. | `@include common.conf @include metrics.conf` |
| `fluentd.metrics.outputConf` | Configuration for sumologic output plugin. | `@include metrics.output.conf` |
| `fluentd.metrics.extraEnvVars` | Additional environment variables for metrics metadata enrichment pods. | `Nil` |
| `fluentd.metrics.extraVolumes` | Additional volumes for metrics metadata enrichment pods. | `Nil` |
| `fluentd.metrics.extraVolumeMounts` | Additional volume mounts for metrics metadata enrichment pods. | `Nil` |
| `fluentd.metrics.extraOutputConf` | Additional config parameters for sumologic output plugin | `Nil` |
| `fluentd.metrics.extraFilterPluginConf` | To use additional filter plugins. | `Nil` |
| `fluentd.metrics.extraOutputPluginConf` | To use additional output plugins. | `Nil` |
| `fluentd.metrics.overrideOutputConf` | Override output section for metrics. Leave empty for the default output section. | `Nil` |
| `fluentd.monitoring` | Configuration of fluentd monitoring metrics. Adds the `fluentd_input_status_num_records_total` metric for input and the `fluentd_output_status_num_records_total` metric for output. | `{"input": false, "output": false}` |
| `fluentd.events.enabled` | If enabled, collect K8s events. | `true` |
| `fluentd.events.statefulset.nodeSelector` | Node selector for Fluentd events statefulset. [See docs/Best_Practices.md for more information.](../../docs/Best_Practices.md) | `{}` |
| `fluentd.events.statefulset.affinity` | Affinity for Fluentd events statefulset. | `{}` |
| `fluentd.events.statefulset.tolerations` | Tolerations for Fluentd events statefulset. | `[]` |
| `fluentd.events.statefulset.resources` | Resources for Fluentd log statefulset. | `{"limits":{"cpu":"100m","memory":"256Mi"},"requests":{"cpu":"100m","memory":"256Mi"}}` |
| `fluentd.events.statefulset.podLabels` | Additional labels for fluentd events pods. | `{}` |
| `fluentd.events.statefulset.podAnnotations` | Additional annotations for fluentd events pods. | `{}` |
| `fluentd.events.statefulset.priorityClassName` | Priority class name for fluentd events pods. | `Nil` |
| `fluentd.events.statefulset.initContainers` | Define init containers that will be run for fluentd events statefulset. | `[]` |
| `fluentd.events.sourceName` | Source name for the Events source. Default: "events" | `Nil` |
| `fluentd.events.sourceCategory` | Source category for the Events source. Default: "{clusterName}/events" | `Nil` |
| `fluentd.events.overrideOutputConf` | Override output section for events. Leave empty for the default output section. | `Nil` |
| `metrics-server.enabled` | Set the enabled flag to true for enabling metrics-server. This is required before enabling fluentd autoscaling unless you have an existing metrics-server in the cluster. | `false` |
| `metrics-server.fullnameOverride` | Used to override the chart's full name. | `Nil` |
| `metrics-server.args` | Arguments for metric server. | `["--kubelet-insecure-tls","--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname"]` |
| `fluent-bit.fullnameOverride` | Used to override the chart's full name. | `Nil` |
| `fluent-bit.resources` | Resources for Fluent-bit daemonsets. | `{}` |
| `fluent-bit.enabled` | Flag to control deploying Fluent-bit Helm sub-chart. | `true` |
| `fluent-bit.config.service` | Configure Fluent-bit Helm sub-chart service. | See [values.yaml] |
| `fluent-bit.config.inputs` | Configure Fluent-bit Helm sub-chart inputs. Configuration for logs from different container runtimes is described in [Container log parsing](../../docs/ContainerLogs.md). | See [values.yaml] |
| `fluent-bit.config.outputs` | Configure Fluent-bit Helm sub-chart outputs. | See [values.yaml] |
| `fluent-bit.config.customParsers` | Configure Fluent-bit Helm sub-chart customParsers. | See [values.yaml] |
| `fluent-bit.service.labels` | Labels for fluent-bit service. | `{sumologic.com/scrape: "true"}` |
| `fluent-bit.podLabels` | Additional labels for fluent-bit pods. | `{}` |
| `fluent-bit.podAnnotations` | Additional annotations for fluent-bit pods. | `{}` |
| `fluent-bit.service.flush` | Frequency to flush fluent-bit buffer to fluentd. | `5` |
| `fluent-bit.metrics.enabled` | Enable metrics from fluent-bit. | `true` |
| `fluent-bit.env` | Environment variables for fluent-bit. | See [values.yaml] |
| `fluent-bit.backend.type` | Set the backend to which Fluent-Bit should flush the information it gathers | `forward` |
| `fluent-bit.backend.forward.host` | Target host where Fluent-Bit or Fluentd are listening for Forward messages. | `${FLUENTD_LOGS_SVC}.${NAMESPACE}.svc.cluster.local.` |
| `fluent-bit.backend.forward.port` | TCP Port of the target service. | `24321` |
| `fluent-bit.backend.forward.tls` | Enable or disable TLS support. | `off` |
| `fluent-bit.backend.forward.tls_verify` | Force certificate validation. | `on` |
| `fluent-bit.backend.forward.tls_debug` | Set TLS debug verbosity level. It accept the following values: 0-4. | `1` |
| `fluent-bit.backend.forward.shared_key` | A key string known by the remote Fluentd used for authorization. | `Nil` |
| `fluent-bit.trackOffsets` | Specify whether to track the file offsets for tailing docker logs. This allows fluent-bit to pick up where it left after pod restarts but requires access to a hostPath. | `true` |
| `fluent-bit.tolerations` | Optional daemonset tolerations. | `[{"effect":"NoSchedule","operator":"Exists"}]` |
| `fluent-bit.input.systemd.enabled` | Enable systemd input. | `true` |
| `fluent-bit.parsers.enabled` | Enable custom parsers. | `true` |
| `fluent-bit.parsers.regex` | List of regex parsers. | `[{"name":"multi_line","regex":"(?\u003clog\u003e^{\"log\":\"\\d{4}-\\d{1,2}-\\d{1,2}.\\d{2}:\\d{2}:\\d{2}.*)"}]` |
| `fluent-bit.nodeSelector` | Node selector for fluent-bit. [See docs/Best_Practices.md for more information.](../../docs/Best_Practices.md) | `{}` |
| `fluent-bit.priorityClassName` | Priority Class name for `fluent-bit` pods. | `Nil` |
| `kube-prometheus-stack.kubeTargetVersionOverride` | Provide a target gitVersion of K8S, in case .Capabilites.KubeVersion is not available (e.g. helm template). Changing this may break Sumo Logic apps. | `1.13.0-0` |
| `kube-prometheus-stack.enabled` | Flag to control deploying Prometheus Operator Helm sub-chart. | `true` |
| `kube-prometheus-stack.fullnameOverride` | Used to override the chart's full name. | `Nil` |
| `kube-prometheus-stack.alertmanager.enabled` | Deploy alertmanager. | `false` |
| `kube-prometheus-stack.grafana.enabled` | If true, deploy the grafana sub-chart. | `false` |
| `kube-prometheus-stack.grafana.defaultDashboardsEnabled` | Deploy default dashboards. These are loaded using the sidecar. | `false` |
| `kube-prometheus-stack.prometheusOperator.podLabels` | Additional labels for prometheus operator pods. | `{}` |
| `kube-prometheus-stack.prometheusOperator.podAnnotations` | Additional annotations for prometheus operator pods. | `{}` |
| `kube-prometheus-stack.prometheusOperator.resources` | Resource limits for prometheus operator. Uses sub-chart defaults. | `{}` |
| `kube-prometheus-stack.prometheusOperator.admissionWebhooks.enabled` | Create PrometheusRules admission webhooks. Mutating webhook will patch PrometheusRules objects indicating they were validated. Validating webhook will check the rules syntax. | `false` |
| `kube-prometheus-stack.prometheusOperator.tls.enabled` | Enable TLS in prometheus operator. | `false` |
| `kube-prometheus-stack.kube-state-metrics.fullnameOverride` | Used to override the chart's full name. | `Nil` |
| `kube-prometheus-stack.kube-state-metrics.resources` | Resource limits for kube state metrics. Uses sub-chart defaults. | `{}` |
| `kube-prometheus-stack.kube-state-metrics.customLabels` | Custom labels to apply to service, deployment and pods. Uses sub-chart defaults. | `{}` |
| `kube-prometheus-stack.kube-state-metrics.podAnnotations` | Additional annotations for pods in the DaemonSet. Uses sub-chart defaults. | `{}` |
| `kube-prometheus-stack.prometheus.additionalServiceMonitors` | List of ServiceMonitor objects to create. | See [values.yaml] |
| `kube-prometheus-stack.prometheus.prometheusSpec.resources` | Resource limits for prometheus. Uses sub-chart defaults. | `{}` |
| `kube-prometheus-stack.prometheus.prometheusSpec.thanos.baseImage` | Base image for Thanos container. | `quay.io/thanos/thanos` |
| `kube-prometheus-stack.prometheus.prometheusSpec.thanos.version` | Image tag for Thanos container. | `v0.10.0` |
| `kube-prometheus-stack.prometheus.prometheusSpec.containers` | Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to a Prometheus pod. | See [values.yaml] |
| `kube-prometheus-stack.prometheus.prometheusSpec.podMetadata.labels` | Add custom pod labels to prometheus pods | `{}` |
| `kube-prometheus-stack.prometheus.prometheusSpec.podMetadata.annotations` | Add custom pod annotations to prometheus pods | `{}` |
| `kube-prometheus-stack.prometheus.prometheusSpec.remoteWrite` | If specified, the remote_write spec. | See [values.yaml] |
| `kube-prometheus-stack.prometheus.prometheusSpec.walCompression` | Enables walCompression in Prometheus | `true` |
| `kube-prometheus-stack.prometheus-node-exporter.fullnameOverride` | Used to override the chart's full name. | `Nil` |
| `kube-prometheus-stack.prometheus-node-exporter.podLabels` | Additional labels for prometheus-node-exporter pods. | `{}` |
| `kube-prometheus-stack.prometheus-node-exporter.podAnnotations` | Additional annotations for prometheus-node-exporter pods. | `{}` |
| `kube-prometheus-stack.prometheus-node-exporter.resources` | Resource limits for node exporter. Uses sub-chart defaults. | `{}` |
| `kube-prometheus-stack.prometheus-node-exporter.nodeSelector` | Node selector for prometheus node exporter. [See docs/Best_Practices.md for more information.](../../docs/Best_Practices.md) | `{}` |
| `kube-prometheus-stack.kube-state-metrics.nodeSelector` | Node selector for kube-state-metrics. [See docs/Best_Practices.md for more information.](../../docs/Best_Practices.md) | `{}` |
| `falco.enabled` | Flag to control deploying Falco Helm sub-chart. | `false` |
| `falco.fullnameOverride` | Used to override the chart's full name. | `Nil` |
| `falco.addKernelDevel` | Flag to control installation of `kernel-devel` on nodes using MachineConfig, required to build falco modules (only for OpenShift) | `true` |
| `falco.extraInitContainers` | InitContainers for Falco pod | See [values.yaml] |
| `falco.ebpf.enabled` | Enable eBPF support for Falco instead of falco-probe kernel module. Set to true for GKE. | `false` |
| `falco.falco.jsonOutput` | Output events in json. | `true` |
| `falco.pullSecrets` | Pull secrets for falco images. For more information on using Kubernetes secrets with container registries please refer to [Creating a Secret with a Docker config at kubernetes.io](https://kubernetes.io/docs/concepts/containers/images/#creating-a-secret-with-a-docker-config). | `[]` |
| `telegraf-operator.enabled` | Flag to control deploying Telegraf Operator Helm sub-chart. | `false` |
| `telegraf-operator.fullnameOverride` | Used to override the chart's full name. | `Nil` |
| `telegraf-operator.replicaCount` | Replica count for Telegraf Operator pods. | 1 |
| `telegraf-operator.classes.secretName` | Secret name in which the Telegraf Operator configuration will be stored. | `telegraf-operator-classes` |
| `telegraf-operator.default` | Name of the default output configuration. | `sumologic-prometheus` |
| `telegraf-operator.data` | Telegraf sidecar configuration. | See [values.yaml] |
| `opentelemetry-operator.enabled` | Flag to control deploying OpenTelemetry Operator Helm sub-chart. | `false` |
| `opentelemetry-operator.createDefaultInstrumentation` | Flag to control creation of default Instrumentation object | `true` |
| `opentelemetry-operator.manager.env.WATCH_NAMESPACE` | Used to set value for `WATCH_NAMESPACE` environment variable which specifies Namespace to watch and create Instrumentation objects. | `Nil` |
| `otelagent.enabled` | Enables OpenTelemetry Collector Agent mode DaemonSet. | `false` |
| `otelagent.daemonset.nodeSelector` | Node selector for otelagent daemonset. [See docs/Best_Practices.md for more information.](../../docs/Best_Practices.md) | `{}` |
| `otelagent.daemonset.priorityClassName` | Priority class name for OpenTelemetry Agent trace pods. | If not provided then set to `RELEASE-NAME-sumologic-priorityclass`. |
| `otelcol.deployment.replicas` | Set the number of OpenTelemetry Collector replicas. | `1` |
| `otelcol.deployment.resources.limits.memory` | Sets the OpenTelemetry Collector memory limit. | `2Gi` |
| `otelcol.deployment.priorityClassName` | Priority class name for OpenTelemetry Collector log pods. | `Nil` |
| `otelcol.deployment.nodeSelector` | Node selector for otelcol deployment. [See docs/Best_Practices.md for more information.](../../docs/Best_Practices.md) | `{}` |
| `otelcol.metrics.enabled` | Enable or disable generation of the metrics from Collector. | `true` |
| `otelcol.config.service.pipelines.traces.receivers` | Sets the list of enabled receivers. | `{jaeger, opencensus, otlp, zipkin}` |
| `otelcol.config.exporters.zipkin.timeout` | Sets the Zipkin (default) exporter timeout. Append the unit, e.g. `s` when setting the parameter | `5s` |
| `otelcol.config.exporters.logging.loglevel` | When tracing debug logging exporter is enabled, sets the verbosity level. Use either `info` or `debug`. | `info` |
| `otelcol.config.service.pipelines.traces.exporters` | Sets the list of exporters enabled within OpenTelemetry Collector. Available values: `zipkin`, `logging`. Set to `{zipkin, logging}` to enable logging debugging exporter. | `{zipkin}` |
| `otelcol.config.service.pipelines.traces.processors` | Sets the list of enabled OpenTelemetry Collector processors. | `{memory_limiter, k8s_tagger, source, resource, batch, queued_retry}` |
| `otelcol.config.processors.memory_limiter.limit_mib` | Sets the OpenTelemetry Collector memory limitter plugin value (in MiB). Should be at least 100 Mib less than the value of `otelcol.deployment.resources.limits.memory`. | `1900` |
| `otelcol.config.processors.batch.send_batch_size` | Sets the preferred size of batch (in number of spans). | `256` |
| `otelcol.config.processors.batch.send_batch_max_size` | Sets the maximum allowed size of a batch (in number of spans). Use with caution, setting too large value might cause 413 Payload Too Large errors. | `512` |
| `otelcol.logLevelFilter` | Do not send otelcol logs if `true`. | `false` |
| `otelgateway.deployment.nodeSelector` | Node selector for otelgateway deployment. [See docs/Best_Practices.md for more information.](../../docs/Best_Practices.md) | `{}` |
| `otellogs.daemonset.nodeSelector` | Node selector for otellogs daemonset. [See docs/Best_Practices.md for more information.](../../docs/Best_Practices.md) | `{}` |
| `otellogs.daemonset.priorityClassName` | Priority class name for OpenTelemetry Agent log pods. | If not provided then set to `RELEASE-NAME-sumologic-priorityclass`. |
| `metadata.image.repository` | Image repository for otelcol docker container. | `public.ecr.aws/sumologic/sumologic-otel-collector` |
| `metadata.image.tag` | Image tag for otelcol docker container. | `0.0.18` |
| `metadata.image.pullPolicy` | Image pullPolicy for otelcol docker container. | `IfNotPresent` |
| `metadata.securityContext` | The securityContext configuration for otelcol. | `{"fsGroup": 999}` |
| `metadata.podLabels` | Additional labels for all otelcol pods. | `{}` |
| `metadata.podAnnotations` | Additional annotations for all otelcol pods. | `{}` |
| `metadata.serviceLabels` | Additional labels for all otelcol pods. | `{}` |
| `metadata.persistence.enabled` | Flag to control persistence for OpenTelemetry Collector. | `true` |
| `metadata.persistence.storageClass` | Defines storageClassName for the PersistentVolumeClaim which is used to provide persistence for OpenTelemetry Collector. | `Nil` |
| `metadata.persistence.accessMode` | The accessMode for the volume which is used to provide persistence for OpenTelemetry Collector. | `ReadWriteOnce` |
| `metadata.persistence.size` | Size of the volume which is used to provide persistence for OpenTelemetry Collector. | `10Gi` |
| `metadata.persistence.pvcLabels` | Additional PersistentVolumeClaim labels for all OpenTelemetry Collector pods. | `{}` |
| `metadata.metrics.enabled` | Flag to control deploying the otelcol metrics statefulsets. | `true` |
| `metadata.metrics.logLevel` | Flag to control logging level for OpenTelemetry Collector for metrics. Can be `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`. | `info` |
| `metadata.metrics.config` | Configuration for metrics otelcol. See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/Configuration.md. | See [values.yaml] |
| `metadata.metrics.statefulset.containers.otelcol.startupProbe` | Startup probe configuration for metrics otelcol container. | `{ periodSeconds: 3, failureThreshold: 60}` |
| `metadata.metrics.statefulset.nodeSelector` | Node selector for metrics metadata enrichment (otelcol) statefulset. [See docs/Best_Practices.md for more information.](../../docs/Best_Practices.md) | `{}` |
| `metadata.metrics.statefulset.tolerations` | Tolerations for metrics metadata enrichment (otelcol) statefulset. | `[]` |
| `metadata.metrics.statefulset.affinity` | Affinity for metrics metadata enrichment (otelcol) statefulset. | `{}` |
| `metadata.metrics.statefulset.podAntiAffinity` | PodAntiAffinity for metrics metadata enrichment (otelcol) statefulset. | `soft` |
| `metadata.metrics.statefulset.topologySpreadConstraints` | TopologySpreadConstraints for metrics metadata enrichment (otelcol) statefulset. | `[]` |
| `metadata.metrics.statefulset.replicaCount` | Replica count for metrics metadata enrichment (otelcol) statefulset. | `3` |
| `metadata.metrics.statefulset.resources` | Resources for metrics metadata enrichment (otelcol) statefulset. | `{"limits":{"cpu":1,"memory":"1Gi"},"requests":{"cpu":0.5,"memory":"768Mi"}}` |
| `metadata.metrics.statefulset.priorityClassName` | Priority class name for metrics metadata enrichment (otelcol) pods. | `Nil` |
| `metadata.metrics.statefulset.podLabels` | Additional labels for metrics metadata enrichment (otelcol) pods. | `{}` |
| `metadata.metrics.statefulset.podAnnotations` | Additional annotations for metrics metadata enrichment (otelcol) pods. | `{}` |
| `metadata.metrics.statefulset.containers.metadata.securityContext` | The securityContext configuration for otelcol container for metrics metadata enrichment statefulset. | `{}` |
| `metadata.metrics.statefulset.extraEnvVars` | Additional environment variables for metrics metadata enrichment (otelcol) pods. | `Nil` |
| `metadata.metrics.statefulset.extraVolumes` | Additional volumes for metrics metadata enrichment (otelcol) pods. | `Nil` |
| `metadata.metrics.statefulset.extraVolumeMounts` | Additional volume mounts for metrics metadata enrichment (otelcol) pods. | `Nil` |
| `metadata.metrics.autoscaling.enabled` | Option to turn autoscaling on for metrics metadata enrichment (otelcol) and specify params for HPA. Autoscaling needs metrics-server to access cpu metrics. | `false` |
| `metadata.metrics.autoscaling.minReplicas` | Default min replicas for autoscaling. | `3` |
| `metadata.metrics.autoscaling.maxReplicas` | Default max replicas for autoscaling | `10` |
| `metadata.metrics.autoscaling.targetCPUUtilizationPercentage` | The desired target CPU utilization for autoscaling. | `50` |
| `metadata.metrics.autoscaling.targetMemoryUtilizationPercentage` | The desired target memory utilization for autoscaling. | `Nil` |
| `metadata.metrics.podDisruptionBudget` | Pod Disruption Budget for metrics metadata enrichment (otelcol) statefulset. | `{"minAvailable": 2}` |
| `metadata.logs.enabled` | Flag to control deploying the otelcol logs statefulsets. | `true` |
| `metadata.logs.logLevel` | Flag to control logging level for OpenTelemetry Collector for logs. Can be `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`. | `info` |
| `metadata.logs.config` | Configuration for logs otelcol. See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/Configuration.md. | See [values.yaml] |
| `metadata.logs.statefulset.containers.otelcol.startupProbe` | Startup probe configuration for logs otelcol container. | `{ periodSeconds: 3, failureThreshold: 60}` |
| `metadata.logs.statefulset.nodeSelector` | Node selector for logs metadata enrichment (otelcol) statefulset. [See docs/Best_Practices.md for more information.](../../docs/Best_Practices.md) | `{}` |
| `metadata.logs.statefulset.tolerations` | Tolerations for logs metadata enrichment (otelcol) statefulset. | `[]` |
| `metadata.logs.statefulset.affinity` | Affinity for logs metadata enrichment (otelcol) statefulset. | `{}` |
| `metadata.logs.statefulset.podAntiAffinity` | PodAntiAffinity for logs metadata enrichment (otelcol) statefulset. | `soft` |
| `metadata.logs.statefulset.topologySpreadConstraints` | TopologySpreadConstraints for logs metadata enrichment (otelcol) statefulset. | `[]` |
| `metadata.logs.statefulset.replicaCount` | Replica count for logs metadata enrichment (otelcol) statefulset. | `3` |
| `metadata.logs.statefulset.resources` | Resources for logs metadata enrichment (otelcol) statefulset. | `{"limits":{"cpu":1,"memory":"1Gi"},"requests":{"cpu":0.5,"memory":"768Mi"}}` |
| `metadata.logs.statefulset.priorityClassName` | Priority class name for logs metadata enrichment (otelcol) pods. | `Nil` |
| `metadata.logs.statefulset.podLabels` | Additional labels for logs metadata enrichment (otelcol) pods. | `{}` |
| `metadata.logs.statefulset.podAnnotations` | Additional annotations for logs metadata enrichment (otelcol) pods. | `{}` |
| `metadata.logs.statefulset.containers.metadata.securityContext` | The securityContext configuration for otelcol container for logs metadata enrichment statefulset. | `{}` |
| `metadata.logs.statefulset.extraEnvVars` | Additional environment variables for logs metadata enrichment (otelcol) pods. | `Nil` |
| `metadata.logs.statefulset.extraVolumes` | Additional volumes for logs metadata enrichment (otelcol) pods. | `Nil` |
| `metadata.logs.statefulset.extraVolumeMounts` | Additional volume mounts for logs metadata enrichment (otelcol) pods. | `Nil` |
| `metadata.logs.autoscaling.enabled` | Option to turn autoscaling on for logs metadata enrichment (otelcol) and specify params for HPA. Autoscaling needs metrics-server to access cpu metrics. | `false` |
| `metadata.logs.autoscaling.minReplicas` | Default min replicas for autoscaling. | `3` |
| `metadata.logs.autoscaling.maxReplicas` | Default max replicas for autoscaling | `10` |
| `metadata.logs.autoscaling.targetCPUUtilizationPercentage` | The desired target CPU utilization for autoscaling. | `50` |
| `metadata.logs.autoscaling.targetMemoryUtilizationPercentage` | The desired target memory utilization for autoscaling. | `Nil` |
| `metadata.logs.podDisruptionBudget` | Pod Disruption Budget for logs metadata enrichment (otelcol) statefulset. | `{"minAvailable": 2}` |
| `otelevents.image.repository` | Image repository for otelcol docker container. | `public.ecr.aws/sumologic/sumologic-otel-collector` |
| `otelevents.image.tag` | Image tag for otelcol docker container. | `0.54.0-sumo-0` |
| `otelevents.image.pullPolicy` | Image pullPolicy for otelcol docker container. | `IfNotPresent` |
| `otelevents.logLevel` | Log level for the OpenTelemtry Collector. Can be `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`. | `info` |
| `otelevents.persistence.enabled` | Enable persistence for OpenTelemetry Collector. | `true` |
| `otelevents.persistence.storageClass` | Defines storageClassName for the PersistentVolumeClaim which is used to provide persistence for OpenTelemetry Collector. | `Nil` |
| `otelevents.persistence.accessMode` | The accessMode for the volume which is used to provide persistence for OpenTelemetry Collector. | `ReadWriteOnce` |
| `otelevents.persistence.size` | Size of the volume which is used to provide persistence for OpenTelemetry Collector. | `10Gi` |
| `otelevents.persistence.pvcLabels` | Additional PersistentVolumeClaim labels for all OpenTelemetry Collector pods. | `{}` |
| `otelevents.config.override` | Override configuration for OpenTelemetry Collector. See [the documentation](../../docs/opentelemetry_collector.md#customizing-opentelemetry-collector-configuration) for more details. | `{}` |
| `otelevents.statefulset` | OpenTelemetry Collector StatefulSet customization options. See values.yaml for more details. | See [values.yaml] |
| `tailing-sidecar-operator.enabled` | Flag to control deploying Tailing Sidecar Operator Helm sub-chart. | `false` |
| `tailing-sidecar-operator.fullnameOverride` | Used to override the chart's full name. | `Nil` |
| `tailing-sidecar-operator.scc.create` | Create OpenShift's Security Context Constraint | `false` |
| `prometheus.prometheusSpec.nodeSelector` | Node selector for prometheus. [See docs/Best_Practices.md for more information.](../../docs/Best_Practices.md) | `{}` |
[values.yaml]: values.yaml

View File

@ -0,0 +1,21 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj

View File

@ -0,0 +1,676 @@
# Change Log
This file documents all notable changes to Falco Helm Chart. The release
numbering uses [semantic versioning](http://semver.org).
## v1.18.6
* Bump falcosidekick chart dependency (fix issue with the UI)
## v1.18.5
* Bump falcosidekick chart dependency
## v1.18.4
* Now the url to falcosidekick on NOTES.txt on falco helm chart points to the right place.
## v1.18.3
* Fix for [issue 318](https://github.com/falcosecurity/charts/issues/318) - Missing comma in k8s_audit_rules.yaml.
## v1.18.2
* Further fix for `--reuse-values` option after the introduction of `crio.enabled`.
## v1.18.1
* Workaround to make this chart work with Helm `--reuse-values` option after the introduction of `crio.enabled`.
## v1.18.0
* Added support for cri-o
## v1.17.6
Remove whitespace around `falco.httpOutput.url` to fix the error `libcurl error: URL using bad/illegal format or missing URL`.
## v1.17.5
* Changed `falco.httpOutput.url` so that it always overrides the default URL, even when falcosidekick is enabled. (NOTE: don't use this version, see v1.17.6)
## v1.17.4
* Upgrade to Falco 0.31.1 (see the [Falco changelog](https://github.com/falcosecurity/falco/blob/0.31.1/CHANGELOG.md))
* Update rulesets from Falco 0.31.1
## v1.17.3
* Fix quoting around `--k8s-node`
## v1.17.2
* Add `leastPrivileged.enabled` configuration
## v1.17.1
* Fixed `priority` level `info` change to `informational`
## v1.17.0
* Upgrade to Falco 0.31.0 (see the [Falco changelog](https://github.com/falcosecurity/falco/blob/0.31.0/CHANGELOG.md))
* Update rulesets from Falco 0.31.0
* Update several configuration options under the `falco` node to reflect the new Falco version
* Initial plugins support
## v1.16.4
* Bump falcosidekick chart dependency
## v1.16.2
* Add `serviceAccount.annotations` configuration
## v1.16.1
* Fixed string escaping for `--k8s-node`
## v1.16.0
* Upgrade to Falco 0.30.0 (see the [Falco changelog](https://github.com/falcosecurity/falco/blob/0.30.0/CHANGELOG.md))
* Update rulesets from Falco 0.30.0
* Add `kubernetesSupport.enableNodeFilter` configuration to enable node filtering when requesting pods metadata from Kubernetes
* Add `falco.metadataDownload` configuration for fine-tuning container orchestrator metadata fetching params
* Add `falco.jsonIncludeTagsProperty` configuration to include tags in the JSON output
## v1.15.7
* Removed `maxSurge` reference from comment in Falco's `values.yaml` file.
## v1.15.6
* Update `Falcosidekick` chart to 0.3.13
## v1.15.4
* Update `Falcosidekick` chart to 0.3.12
## v1.15.3
* Upgrade to Falco 0.29.1 (see the [Falco changelog](https://github.com/falcosecurity/falco/blob/0.29.1/CHANGELOG.md))
* Update rulesets from Falco 0.29.1
## v1.15.2
* Add ability to use an existing secret of key, cert, ca as well as pem bundle instead of creating it from files
## v1.15.1
* Fixed liveness and readiness probes schema when ssl is enabled
## v1.14.1
* Update `Falcosidekick` chart to 0.3.8
## v1.14.1
* Update image tag to 0.29.0 in values.yaml
## v1.14.0
* Upgrade to Falco 0.29.0 (see the [Falco changelog](https://github.com/falcosecurity/falco/blob/0.29.0/CHANGELOG.md))
* Update rulesets from Falco 0.29.0
## v1.13.2
* Fixed incorrect spelling of `fullfqdn`
## v1.13.1
* Fix port for readinessProbe and livenessProbe
## v1.13.0
* Add liveness and readiness probes to Falco
## v1.12.0
* Add `kubernetesSupport` configuration to make Kubernetes Falco support optional in the daemonset (enabled by default)
## v1.11.1
* Upgrade to Falco 0.28.1 (see the [Falco changelog](https://github.com/falcosecurity/falco/blob/0.28.1/CHANGELOG.md))
## v1.11.0
* Bump up version of chart for `Falcosidekick` dependency to `v3.5.0`
## v1.10.0
* Add `falcosidekick.fullfqdn` option to connect `falco` to `falcosidekick` with full FQDN
* Bump up version of chart for `Falcosidekick` dependency
## v1.9.0
* Upgrade to Falco 0.28.0 (see the [Falco changelog](https://github.com/falcosecurity/falco/blob/0.28.0/CHANGELOG.md))
* Update rulesets from Falco 0.28.0
## v1.8.1
* Bump up version of chart for `Falcosidekick` dependency
## v1.8.0
* Bump up version of chart for `Falcosidekick` dependency
## v1.7.10
* Update rule `Write below monitored dir` description
## v1.7.9
* Add a documentation section about the driver
## v1.7.8
* Increase CPU limit default value
## v1.7.7
* Add a documentation section about using init containers
## v1.7.6
* Correct icon URL
## v1.7.5
* Update downstream sidekick chart
## v1.7.4
* Add `ebpf.probe.path` configuration option
## v1.7.3
* Bump up version of chart for `Falcosidekick` dependency
## v1.7.2
* Fix `falco` configmap when `Falcosidekick` is enabled, wrong service name was used
## v1.7.1
* Correct image tag for Falco 0.27.0
## v1.7.0
* Upgrade to Falco 0.27.0 (see the [Falco changelog](https://github.com/falcosecurity/falco/blob/0.27.0/CHANGELOG.md))
* Add `falco.output_timeout` configuration setting
## v1.6.1
### Minor Changes
* Add `falcosidekick` as an optional dependency
## v1.6.0
### Minor Changes
* Remove deprecated integrations (see [#123](https://github.com/falcosecurity/charts/issues/123))
## v1.5.8
### Minor Changes
* Add value `extraVolumes`, allow adding extra volumes to falco daemonset
* Add value `extraVolumeMounts`, allow adding extra volumeMounts to falco container in falco daemonset
## v1.5.6
### Minor Changes
* Add `falco.webserver.sslEnabled` config, enabling SSL support
* Add `falco.webserver.nodePort` configuration as an alternative way for exposing the AuditLog webhook (disabled by default)
## v1.5.5
### Minor Changes
* Support release namespace configuration
## v1.5.4
### Minor Changes
* Upgrade to Falco 0.26.2, `DRIVERS_REPO` now defaults to https://download.falco.org/driver (see the [Falco changelog](https://github.com/falcosecurity/falco/blob/0.26.2/CHANGELOG.md))
## v1.5.3
### Minor Changes
* Deprecation notice for gcscc, natsOutput, snsOutput, pubsubOutput integrations
* Clean up old references from documentation
## v1.5.2
### Minor Changes
* Add Pod Security Policy Support for the fake event generator
## v1.5.1
### Minor Changes
* Replace extensions apiGroup/apiVersion because of deprecation
## v1.5.0
### Minor Changes
* Upgrade to Falco 0.26.1
* Update ruleset from Falco 0.26.1
* Automatically set the appropriate apiVersion for rbac
## v1.4.0
### Minor Changes
* Allow adding InitContainers to Falco pod with `extraInitContainers` configuration
## v1.3.0
### Minor Changes
* Upgrade to Falco 0.25.0
* Update ruleset from Falco 0.25.0
## v1.2.3
### Minor Changes
* Fix duplicate mount point problem when both gRPC and NATS integrations are enabled
## v1.2.2
### Minor Changes
* Allow configuration using values for `imagePullSecrets` setting
* Add `docker.io/falcosecurity/falco` image to `falco_privileged_images` macro
## v1.2.1
### Minor Changes
* Add SecurityContextConstraint to allow deploying in Openshift
## v1.2.0
### Minor Changes
* Upgrade to Falco 0.24.0
* Update ruleset from Falco 0.24.0
* gRPC Unix Socket support
* Set default threadiness to 0 ("auto" behavior) for the gRPC server
## v1.1.10
### Minor Changes
* Switch to `falcosecurity/event-generator`
* Allow configuration using values for `fakeEventGenerator.args` setting
* Update ruleset
* New releasing mechanism
## v1.1.9
### Minor Changes
* Add missing privileges for the apps Kubernetes API group
* Allow client config url for Audit Sink with `auditLog.dynamicBackend.url`
## v1.1.8
### Minor Changes
* Upgrade to Falco 0.23.0
* Correct socket path for `--cri` flag
* Always mount `/etc` (required by `falco-driver-loader`)
## v1.1.7
### Minor Changes
* Add pod annotation support for daemonset
## v1.1.6
### Minor Changes
* Upgrade to Falco 0.21.0
* Upgrade rules to Falco 0.21.0
## v1.1.5
### Minor Changes
* Add headless service for gRPC server
* Allow gRPC certificates configuration by using `--set-file`
## v1.1.4
### Minor Changes
* Make `/lib/modules` writable from the container
## v1.1.3
### Minor Changes
* Allow configuration using values for `grpc` setting
* Allow configuration using values for `grpc_output` setting
## v1.1.2
### Minor Changes
* Upgrade to Falco 0.20.0
* Upgrade rules to Falco 0.20.0
## v1.1.1
### Minor Changes
* Upgrade to Falco 0.19.0
* Upgrade rules to Falco 0.19.0
* Remove Sysdig references, Falco is a project by its own name
## v1.1.0
### Minor Changes
* Revamp auditLog feature
* Upgrade to latest version (0.18.0)
* Replace CRI references with containerD
## v1.0.12
### Minor Changes
* Support multiple lines for `falco.programOutput.program`
## v1.0.11
### Minor Changes
* Add affinity
## v1.0.10
### Minor Changes
* Migrate API versions from deprecated, removed versions to support Kubernetes v1.16
## v1.0.9
### Minor Changes
* Restrict the access to `/dev` on underlying host to read only
## v1.0.8
### Minor Changes
* Upgrade to Falco 0.17.1
* Upgrade rules to Falco 0.17.1
## v1.0.7
### Minor Changes
* Allow configuration using values for `nodeSelector` setting
## v1.0.6
### Minor Changes
* Falco does a rollingUpgrade when the falco or falco-rules configMap changes
with a helm upgrade
## v1.0.5
### Minor Changes
* Add 3 resources (`daemonsets`, `deployments`, `replicasets`) to the ClusterRole resource list
Ref: [PR#514](https://github.com/falcosecurity/falco/pull/514) from Falco repository
## v1.0.4
### Minor Changes
* Upgrade to Falco 0.17.0
* Upgrade rules to Falco 0.17.0
## v1.0.3
### Minor Changes
* Support [`priorityClassName`](https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/)
## v1.0.2
### Minor Changes
* Upgrade to Falco 0.16.0
* Upgrade rules to Falco 0.16.0
## v1.0.1
### Minor Changes
* Extra environment variables passed to daemonset pods
## v1.0.0
### Major Changes
* Add support for K8s audit logging
## v0.9.1
### Minor Changes
* Allow configuration using values for `time_format_iso8601` setting
* Allow configuration using values for `syscall_event_drops` setting
* Allow configuration using values for `http_output` setting
* Add CHANGELOG entry for v0.8.0, [not present on its PR](https://github.com/helm/charts/pull/14813#issuecomment-506821432)
## v0.9.0
### Major Changes
* Add nestorsalceda as an approver
## v0.8.0
### Major Changes
* Allow configuration of Pod Security Policy. This is needed to get Falco
running when the Admission Controller is enabled.
## v0.7.10
### Minor Changes
* Fix bug with Google Cloud Security Command Center and Falco integration
## v0.7.9
### Minor Changes
* Upgrade to Falco 0.15.3
* Upgrade rules to Falco 0.15.3
## v0.7.8
### Minor Changes
* Add TZ parameter for time correlation in Falco logs
## v0.7.7
### Minor Changes
* Upgrade to Falco 0.15.1
* Upgrade rules to Falco 0.15.1
## v0.7.6
### Major Changes
* Allow to enable/disable usage of the docker socket
* Configurable docker socket path
* CRI support, configurable CRI socket
* Allow to enable/disable usage of the CRI socket
## v0.7.5
### Minor Changes
* Upgrade to Falco 0.15.0
* Upgrade rules to Falco 0.15.0
## v0.7.4
### Minor Changes
* Use the KUBERNETES_SERVICE_HOST environment variable to connect to Kubernetes
API instead of using a fixed name
## v0.7.3
### Minor Changes
* Remove the toJson pipeline when storing Google Credentials. It makes strange
stuff with double quotes and does not allow to use base64 encoded credentials
## v0.7.2
### Minor Changes
* Fix typos in README.md
## v0.7.1
### Minor Changes
* Add Google Pub/Sub Output integration
## v0.7.0
### Major Changes
* Disable eBPF by default on Falco. We activated eBPF by default to make the
CI pass, but now we found a better method to make the CI pass without
bothering our users.
## v0.6.0
### Major Changes
* Upgrade to Falco 0.14.0
* Upgrade rules to Falco 0.14.0
* Enable eBPF by default on Falco
* Allow to download Falco images from different registries than `docker.io`
* Use rollingUpdate strategy by default
* Provide sane defauls for falco resource management
## v0.5.6
### Minor Changes
* Allow extra container args
## v0.5.5
### Minor Changes
* Update correct slack example
## v0.5.4
### Minor Changes
* Using Falco version 0.13.0 instead of latest.
## v0.5.3
### Minor Changes
* Update falco_rules.yaml file to use the same rules that Falco 0.13.0
## v0.5.2
### Minor Changes
* Falco was accepted as a CNCF project. Fix references and download image from
falcosecurity organization.
## v0.5.1
### Minor Changes
* Allow falco to resolve cluster hostnames when running with ebpf.hostNetwork: true
## v0.5.0
### Major Changes
* Add Amazon SNS Output integration
## v0.4.0
### Major Changes
* Allow Falco to be run with a HTTP proxy server
## v0.3.1
### Minor Changes
* Mount in memory volume for shm. It was used in volumes but was not mounted.
## v0.3.0
### Major Changes
* Add eBPF support for Falco. Falco can now read events via an eBPF program
loaded into the kernel instead of the `falco-probe` kernel module.
## v0.2.1
### Minor Changes
* Update falco_rules.yaml file to use the same rules that Falco 0.11.1
## v0.2.0
### Major Changes
* Add NATS Output integration
### Minor Changes
* Fix value mismatch between code and documentation
## v0.1.1
### Minor Changes
* Fix several typos
## v0.1.0
### Major Changes
* Initial release of Sysdig Falco Helm Chart

View File

@ -0,0 +1,6 @@
dependencies:
- name: falcosidekick
repository: https://falcosecurity.github.io/charts
version: 0.5.2
digest: sha256:766497dd14f272cdeedc3f67509d4ad7613190ff257edf46e572a957d88459f1
generated: "2022-06-07T09:41:13.10138558Z"

View File

@ -0,0 +1,24 @@
apiVersion: v2
appVersion: 0.31.1
dependencies:
- condition: falcosidekick.enabled
name: falcosidekick
repository: https://falcosecurity.github.io/charts
version: 0.5.2
description: Falco
home: https://falco.org
icon: https://raw.githubusercontent.com/cncf/artwork/master/projects/falco/horizontal/color/falco-horizontal-color.svg
keywords:
- monitoring
- security
- alerting
- metric
- troubleshooting
- run-time
maintainers:
- email: cncf-falco-dev@lists.cncf.io
name: The Falco Authors
name: falco
sources:
- https://github.com/falcosecurity/falco
version: 1.18.6

View File

@ -0,0 +1,6 @@
approvers:
- bencer
- nestorsalceda
reviewers:
- bencer
- nestorsalceda

View File

@ -0,0 +1,489 @@
# Falco
[Falco](https://falco.org) is a *Cloud Native Runtime Security* tool designed to detect anomalous activity in your applications. You can use Falco to monitor runtime security of your Kubernetes applications and internal components.
## Introduction
This chart adds Falco to all nodes in your cluster using a DaemonSet.
It also provides a Deployment for generating Falco alerts. This is useful for testing purposes.
## Adding `falcosecurity` repository
Before installing the chart, add the `falcosecurity` charts repository:
```bash
helm repo add falcosecurity https://falcosecurity.github.io/charts
helm repo update
```
## Installing the Chart
To install the chart with the release name `falco` run:
```bash
helm install falco falcosecurity/falco
```
After a few seconds, Falco should be running.
> **Tip**: List all releases using `helm list`, a release is a name used to track a specific deployment
### About the driver
Falco needs a driver (the [kernel module](https://falco.org/docs/event-sources/drivers/#kernel-module) or the [eBPF probe](https://falco.org/docs/event-sources/drivers/#ebpf-probe)) to work.
The container image includes a script (`falco-driver-loader`) that either tries to build the driver on-the-fly or downloads a prebuilt driver as a fallback. Usually, no action is required.
If a prebuilt driver is not available for your distribution/kernel, Falco needs **kernel headers** installed on the host as a prerequisite to building the driver on the fly correctly. You can find instructions on installing the kernel headers for your system under the [Install section](https://falco.org/docs/getting-started/installation/) of the official documentation.
## Uninstalling the Chart
To uninstall the `falco` deployment:
```bash
helm uninstall falco
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Configuration
The following table lists the configurable parameters of the Falco chart and their default values.
| Parameter | Description | Default |
|--------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------|
| `image.registry` | The image registry to pull from | `docker.io` |
| `image.repository` | The image repository to pull from | `falcosecurity/falco` |
| `image.tag` | The image tag to pull | `0.31.1` |
| `image.pullPolicy` | The image pull policy | `IfNotPresent` |
| `image.pullSecrets` | The image pull secretes | `[]` |
| `containerd.enabled` | Enable ContainerD support | `true` |
| `containerd.socket` | The path of the ContainerD socket | `/run/containerd/containerd.sock` |
| `crio.enabled` | Enable CRI-O support | `true` |
| `crio.socket` | The path of the CRI-O socket | `/run/crio/crio.sock` |
| `docker.enabled` | Enable Docker support | `true` |
| `docker.socket` | The path of the Docker daemon socket | `/var/run/docker.sock` |
| `kubernetesSupport.enabled` | Enable Kubernetes meta data collection via a connection to the Kubernetes API server | `true` |
| `kubernetesSupport.apiAuth` | Provide the authentication method Falco should use to connect to the Kubernetes API | `/var/run/secrets/kubernetes.io/serviceaccount/token` |
| `kubernetesSupport.apiUrl` | Provide the URL Falco should use to connect to the Kubernetes API | `https://$(KUBERNETES_SERVICE_HOST)` |
| `kubernetesSupport.enableNodeFilter` | If true, only the current node (on which Falco is running) will be considered when requesting metadata of pods | `true` |
| `podLabels` | Customized pod labels | `{}` |
| `resources.requests.cpu` | CPU requested for being run in a node | `100m` |
| `resources.requests.memory` | Memory requested for being run in a node | `512Mi` |
| `resources.limits.cpu` | CPU limit | `1000m` |
| `resources.limits.memory` | Memory limit | `1024Mi` |
| `extraArgs` | Specify additional container args | `[]` |
| `rbac.create` | If true, create & use RBAC resources | `true` |
| `serviceAccount.create` | Create serviceAccount | `true` |
| `serviceAccount.name` | Use this value as serviceAccountName | ` ` |
| `fakeEventGenerator.enabled` | Run [falcosecurity/event-generator](https://github.com/falcosecurity/event-generator) for sample events | `false` |
| `fakeEventGenerator.args` | Arguments for `falcosecurity/event-generator` | `run --loop ^syscall` |
| `fakeEventGenerator.replicas` | How many replicas of `falcosecurity/event-generator` to run | `1` |
| `daemonset.updateStrategy.type` | The updateStrategy for updating the daemonset | `RollingUpdate` |
| `daemonset.env` | Extra environment variables passed to daemonset pods | `{}` |
| `daemonset.podAnnotations` | Extra pod annotations to be added to pods created by the daemonset | `{}` |
| `podSecurityPolicy.create` | If true, create & use podSecurityPolicy | `false` |
| `proxy.httpProxy` | Set the Proxy server if is behind a firewall | ` ` |
| `proxy.httpsProxy` | Set the Proxy server if is behind a firewall | ` ` |
| `proxy.noProxy` | Set the Proxy server if is behind a firewall | ` ` |
| `timezone` | Set the daemonset's timezone | ` ` |
| `priorityClassName` | Set the daemonset's priorityClassName | ` ` |
| `ebpf.enabled` | Enable eBPF support for Falco instead of `falco-probe` kernel module | `false` |
| `ebpf.path` | Path of the eBPF probe | ` ` |
| `ebpf.settings.hostNetwork` | Needed to enable eBPF JIT at runtime for performance reasons | `true` |
| `leastPrivileged.enabled` | Use capabilities instead of running a privileged container. The kernel module driver can not be loaded if enabled. | `false` |
| `auditLog.enabled` | Enable K8s audit log support for Falco | `false` |
| `auditLog.dynamicBackend.enabled` | Deploy the Audit Sink where Falco listens for K8s audit log events | `false` |
| `auditLog.dynamicBackend.url` | Define if Audit Sink client config should point to a fixed [url](https://kubernetes.io/docs/tasks/debug-application-cluster/audit/#url) (useful for development) instead of the default webserver service. | `` |
| `falco.rulesFile` | The location of the rules files | `[/etc/falco/falco_rules.yaml, /etc/falco/falco_rules.local.yaml, /etc/falco/k8s_audit_rules.yaml, /etc/falco/rules.d]` |
| `falco.timeFormatISO8601` | Display times using ISO 8601 instead of local time zone | `false` |
| `falco.jsonOutput` | Output events in json or text | `false` |
| `falco.jsonIncludeOutputProperty` | Include output property in json output | `true` |
| `falco.jsonIncludeTagsProperty` | Include tags property in json output | `true` |
| `falco.logStderr` | Send Falco debugging information logs to stderr | `true` |
| `falco.logSyslog` | Send Falco debugging information logs to syslog | `true` |
| `falco.logLevel` | The minimum level of Falco debugging information to include in logs | `info` |
| `falco.priority` | The minimum rule priority level to load and run | `debug` |
| `falco.bufferedOutputs` | Use buffered outputs to channels | `false` |
| `falco.syscallEventDrops.actions` | Actions to be taken when system calls were dropped from the circular buffer | `[log, alert]` |
| `falco.syscallEventDrops.rate` | Rate at which log/alert messages are emitted | `.03333` |
| `falco.syscallEventDrops.maxBurst` | Max burst of messages emitted | `10` |
| `falco.outputs.output_timeout` | Duration in milliseconds to wait before considering the output timeout deadline exceed | `2000` |
| `falco.outputs.rate` | Number of tokens gained per second | `1` |
| `falco.outputs.maxBurst` | Maximum number of tokens outstanding | `1000` |
| `falco.syslogOutput.enabled` | Enable syslog output for security notifications | `true` |
| `falco.fileOutput.enabled` | Enable file output for security notifications | `false` |
| `falco.fileOutput.keepAlive` | Open file once or every time a new notification arrives | `false` |
| `falco.fileOutput.filename` | The filename for logging notifications | `./events.txt` |
| `falco.stdoutOutput.enabled` | Enable stdout output for security notifications | `true` |
| `falco.webserver.enabled` | Enable Falco embedded webserver to accept K8s audit events | `true` |
| `falco.webserver.k8sAuditEndpoint` | Endpoint where Falco embedded webserver accepts K8s audit events | `/k8s-audit` |
| `falco.webserver.k8sHealthzEndpoint` | Endpoint where Falco exposes the health status | `/healthz` |
| `falco.webserver.listenPort` | Port where Falco embedded webserver listen to connections | `8765` |
| `falco.webserver.nodePort` | Exposes the Falco embedded webserver through a NodePort | `false` |
| `falco.webserver.sslEnabled` | Enable SSL on Falco embedded webserver | `false` |
| `falco.webserver.sslCertificate` | Certificate bundle path for the Falco embedded webserver | `/etc/falco/certs/server.pem` |
| `falco.livenessProbe.initialDelaySeconds` | Tells the kubelet that it should wait X seconds before performing the first probe | `60` |
| `falco.livenessProbe.timeoutSeconds` | Number of seconds after which the probe times out | `5` |
| `falco.livenessProbe.periodSeconds` | Specifies that the kubelet should perform the check every x seconds | `15` |
| `falco.readinessProbe.initialDelaySeconds` | Tells the kubelet that it should wait X seconds before performing the first probe | `30` |
| `falco.readinessProbe.timeoutSeconds` | Number of seconds after which the probe times out | `5` |
| `falco.readinessProbe.periodSeconds` | Specifies that the kubelet should perform the check every x seconds | `15` |
| `falco.programOutput.enabled` | Enable program output for security notifications | `false` |
| `falco.programOutput.keepAlive` | Start the program once or re-spawn when a notification arrives | `false` |
| `falco.programOutput.program` | Command to execute for program output | `mail -s "Falco Notification" someone@example.com` |
| `falco.httpOutput.enabled` | Enable http output for security notifications | `false` |
| `falco.httpOutput.url` | Url to notify using the http output when a notification arrives | |
| `falco.grpc.enabled` | Enable the Falco gRPC server | `false` |
| `falco.grpc.threadiness` | Number of threads (and context) the gRPC server will use, `0` by default, which means "auto" | `0` |
| `falco.grpc.unixSocketPath` | Unix socket the gRPC server will create | `unix:///var/run/falco/falco.sock` |
| `falco.grpc.listenPort` | Port where Falco gRPC server listen to connections | `5060` |
| `falco.grpc.privateKey` | Key file path for the Falco gRPC server | `/etc/falco/certs/server.key` |
| `falco.grpc.certChain` | Cert file path for the Falco gRPC server | `/etc/falco/certs/server.crt` |
| `falco.grpc.rootCerts` | CA root file path for the Falco gRPC server | `/etc/falco/certs/ca.crt` |
| `falco.grpcOutput.enabled` | Enable the gRPC output and events will be kept in memory until you read them with a gRPC client. | `false` |
| `falco.metadataDownload.maxMb` | Max allowed response size (in Mb) when fetching metadata from Kubernetes | `100` |
| `falco.metadataDownload.chunkWaitUs` | Sleep time (in μs) for each download chunck when fetching metadata from Kubernetes | `1000` |
| `falco.metadataDownload.watchFreqSec` | Watch frequency (in seconds) when fetching metadata from Kubernetes | `1` |
| `customRules` | Third party rules enabled for Falco | `{}` |
| `certs.existingSecret` | Existing secret containing the following key, crt and ca as well as the bundle pem. | ` ` |
| `certs.server.key` | Key used by gRPC and webserver | ` ` |
| `certs.server.crt` | Certificate used by gRPC and webserver | ` ` |
| `certs.ca.crt` | CA certificate used by gRPC, webserver and AuditSink validation | ` ` |
| `nodeSelector` | The node selection constraint | `{}` |
| `affinity` | The affinity constraint | `{}` |
| `tolerations` | The tolerations for scheduling | `node-role.kubernetes.io/master:NoSchedule` |
| `scc.create` | Create OpenShift's Security Context Constraint | `true` |
| `extraInitContainers` | A list of initContainers you want to add to the falco pod in the daemonset. | `[]` |
| `extraVolumes` | A list of volumes you want to add to the falco daemonset. | `[]` |
| `extraVolumeMounts` | A list of volumeMounts you want to add to the falco container in the falco daemonset. | `[]` |
| `falcosidekick.enabled` | Enable `falcosidekick` deployment | `false` |
| `falcosidekick.fullfqdn` | Enable usage of full FQDN of `falcosidekick` service (useful when a Proxy is used) | `false` |
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
```bash
helm install falco --set falco.jsonOutput=true falcosecurity/falco
```
Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,
```bash
helm install falco -f values.yaml falcosecurity/falco
```
> **Tip**: You can use the default [values.yaml](values.yaml)
## Loading custom rules
Falco ships with a nice default ruleset. It is a good starting point but sooner or later, we are going to need to add custom rules which fit our needs.
So the question is: How can we load custom rules in our Falco deployment?
We are going to create a file that contains custom rules so that we can keep it in a Git repository.
```bash
cat custom-rules.yaml
```
And the file looks like this one:
```yaml
customRules:
rules-traefik.yaml: |-
- macro: traefik_consider_syscalls
condition: (evt.num < 0)
- macro: app_traefik
condition: container and container.image startswith "traefik"
# Restricting listening ports to selected set
- list: traefik_allowed_inbound_ports_tcp
items: [443, 80, 8080]
- rule: Unexpected inbound tcp connection traefik
desc: Detect inbound traffic to traefik using tcp on a port outside of expected set
condition: inbound and evt.rawres >= 0 and not fd.sport in (traefik_allowed_inbound_ports_tcp) and app_traefik
output: Inbound network connection to traefik on unexpected port (command=%proc.cmdline pid=%proc.pid connection=%fd.name sport=%fd.sport user=%user.name %container.info image=%container.image)
priority: NOTICE
# Restricting spawned processes to selected set
- list: traefik_allowed_processes
items: ["traefik"]
- rule: Unexpected spawned process traefik
desc: Detect a process started in a traefik container outside of an expected set
condition: spawned_process and not proc.name in (traefik_allowed_processes) and app_traefik
output: Unexpected process spawned in traefik container (command=%proc.cmdline pid=%proc.pid user=%user.name %container.info image=%container.image)
priority: NOTICE
```
So next step is to use the custom-rules.yaml file for installing the Falco Helm chart.
```bash
helm install falco -f custom-rules.yaml falcosecurity/falco
```
And we will see in our logs something like:
```bash
Tue Jun 5 15:08:57 2018: Loading rules from file /etc/falco/rules.d/rules-traefik.yaml:
```
And this means that our Falco installation has loaded the rules and is ready to help us.
## Enabling K8s audit event support
### Using scripts
This has been tested with Kops and Minikube. You will need the following components:
* A Kubernetes cluster greater than v1.13
* The apiserver must be configured with Dynamic Auditing feature, do it with the following flags:
* `--audit-dynamic-configuration`
* `--feature-gates=DynamicAuditing=true`
* `--runtime-config=auditregistration.k8s.io/v1alpha1=true`
You can do it with the [scripts provided by Falco engineers](https://github.com/falcosecurity/evolution/tree/master/examples/k8s_audit_config)
just running:
```
cd examples/k8s_audit_config
bash enable-k8s-audit.sh minikube dynamic
```
Or in the case of Kops:
```
cd examples/k8s_audit_config
APISERVER_HOST=api.my-kops-cluster.com bash ./enable-k8s-audit.sh kops dynamic
```
Then you can install Falco chart enabling the enabling the `falco.webserver`
flag:
`helm install falco --set auditLog.enabled=true --set auditLog.dynamicBackend.enabled=true falcosecurity/falco`
And that's it, you will start to see the K8s audit log related alerts.
### Known validation failed error
Perhaps you may find the case where you receive an error like the following one:
```
helm install falco --set auditLog.enabled=true falcosecurity/falco
Error: validation failed: unable to recognize "": no matches for kind "AuditSink" in version "auditregistration.k8s.io/v1alpha1"
```
This means that the apiserver cannot recognize the `auditregistration.k8s.io`
resource, which means that the dynamic auditing feature hasn't been enabled
properly. You need to enable it or ensure that your using a Kubernetes version
greater than v1.13.
### Manual setup with NodePort on kOps
Using `kops edit cluster`, ensure these options are present, then run `kops update cluster` and `kops rolling-update cluster`:
```yaml
spec:
kubeAPIServer:
auditLogMaxBackups: 1
auditLogMaxSize: 10
auditLogPath: /var/log/k8s-audit.log
auditPolicyFile: /srv/kubernetes/assets/audit-policy.yaml
auditWebhookBatchMaxWait: 5s
auditWebhookConfigFile: /srv/kubernetes/assets/webhook-config.yaml
fileAssets:
- content: |
# content of the webserver CA certificate
# remove this fileAsset and certificate-authority from webhook-config if using http
name: audit-ca.pem
roles:
- Master
- content: |
apiVersion: v1
kind: Config
clusters:
- name: falco
cluster:
# remove 'certificate-authority' when using 'http'
certificate-authority: /srv/kubernetes/assets/audit-ca.pem
server: https://localhost:32765/k8s-audit
contexts:
- context:
cluster: falco
user: ""
name: default-context
current-context: default-context
preferences: {}
users: []
name: webhook-config.yaml
roles:
- Master
- content: |
# ... paste audit-policy.yaml here ...
# https://raw.githubusercontent.com/falcosecurity/evolution/master/examples/k8s_audit_config/audit-policy.yaml
name: audit-policy.yaml
roles:
- Master
```
Then you can install the Falco chart enabling these flags:
```shell
# without SSL (not recommended):
helm install falco --set auditLog.enabled=true --set falco.webserver.nodePort=32765 falcosecurity/falco
# with SSL:
helm install falco \
--set auditLog.enabled=true \
--set falco.webserver.sslEnabled=true \
--set falco.webserver.nodePort=32765 \
--set-file certs.server.key=/path/to/server.key \
--set-file certs.server.crt=/path/to/server.crt \
--set-file certs.ca.crt=/path/to/ca.crt \
falcosecurity/falco
```
The webserver reuses the gRPC certificate setup, which is [documented here](https://falco.org/docs/grpc/#generate-valid-ca). Generating the client certificate isn't required.
## Using an init container
This chart allows adding init containers and extra volume mounts. One common usage of the init container is to specify a different image for loading the driver (ie. [falcosecurity/driver-loader](https://hub.docker.com/repository/docker/falcosecurity/falco-driver-loader)). So then a slim image can be used for running Falco (ie. [falcosecurity/falco-no-driver](https://hub.docker.com/repository/docker/falcosecurity/falco-no-driver)).
### Using `falcosecurity/driver-loader` image
Create a YAML file `values.yaml` as following:
```yaml
image:
repository: falcosecurity/falco-no-driver
extraInitContainers:
- name: driver-loader
image: docker.io/falcosecurity/falco-driver-loader:latest
imagePullPolicy: Always
securityContext:
privileged: true
volumeMounts:
- mountPath: /host/proc
name: proc-fs
readOnly: true
- mountPath: /host/boot
name: boot-fs
readOnly: true
- mountPath: /host/lib/modules
name: lib-modules
- mountPath: /host/usr
name: usr-fs
readOnly: true
- mountPath: /host/etc
name: etc-fs
readOnly: true
```
Then:
```shell
helm install falco -f values.yaml falcosecurity/falco
```
### Using `falcosecurity/driver-loader` image with eBPF
Create a YAML file `values.yaml` as following:
```yaml
image:
repository: falcosecurity/falco-no-driver
extraInitContainers:
- name: driver-loader
image: docker.io/falcosecurity/falco-driver-loader:latest
imagePullPolicy: Always
volumeMounts:
- mountPath: /host/proc
name: proc-fs
readOnly: true
- mountPath: /host/boot
name: boot-fs
readOnly: true
- mountPath: /host/lib/modules
name: lib-modules
- mountPath: /host/usr
name: usr-fs
readOnly: true
- mountPath: /host/etc
name: etc-fs
readOnly: true
- mountPath: /root/.falco
name: driver-fs
env:
- name: FALCO_BPF_PROBE
value:
extraVolumes:
- name: driver-fs
emptyDir: {}
extraVolumeMounts:
- mountPath: /root/.falco
name: driver-fs
ebpf:
enabled: true
```
Then:
```shell
helm install falco -f values.yaml falcosecurity/falco
```
## Enabling gRPC
The Falco gRPC server and the Falco gRPC Outputs APIs are not enabled by default.
Moreover, Falco supports running a gRPC server with two main binding types:
- Over a local **Unix socket** with no authentication
- Over the **network** with mandatory mutual TLS authentication (mTLS)
> **Tip**: Once gRPC is enabled, you can deploy [falco-exporter](https://github.com/falcosecurity/falco-exporter) to export metrics to Prometheus.
### gRPC over unix socket (default)
The preferred way to use the gRPC is over a Unix socket.
To install Falco with gRPC enabled over a **unix socket**, you have to:
```shell
helm install falco \
--set falco.grpc.enabled=true \
--set falco.grpcOutput.enabled=true \
falcosecurity/falco
```
### gRPC over network
The gRPC server over the network can only be used with mutual authentication between the clients and the server using TLS certificates.
How to generate the certificates is [documented here](https://falco.org/docs/grpc/#generate-valid-ca).
To install Falco with gRPC enabled over the **network**, you have to:
```shell
helm install falco \
--set falco.grpc.enabled=true \
--set falco.grpcOutput.enabled=true \
--set falco.grpc.unixSocketPath="" \
--set-file certs.server.key=/path/to/server.key \
--set-file certs.server.crt=/path/to/server.crt \
--set-file certs.ca.crt=/path/to/ca.crt \
falcosecurity/falco
```
## Deploy Falcosidekick with Falco
[`Falcosidekick`](https://github.com/falcosecurity/falcosidekick) can be installed with `Falco` by setting `--set falcosidekick.enabled=true`. This setting automatically configures all options of `Falco` for working with `Falcosidekick`.
All values for configuration of `Falcosidekick` are available by prefixing them with `falcosidekick.`. The full list of available values is [here](https://github.com/falcosecurity/charts/tree/master/falcosidekick#configuration).
For example, to enable the deployment of [`Falcosidekick-UI`](https://github.com/falcosecurity/falcosidekick-ui), add `--set falcosidekick.webui.enabled=true`.
If you use a Proxy in your cluster, the requests between `Falco` and `Falcosidekick` might be captured, use the full FQDN of `Falcosidekick` by using `--set falcosidekick.fullfqdn=true` to avoid that.

View File

@ -0,0 +1,436 @@
# Change Log
This file documents all notable changes to Falcosidekick Helm Chart. The release
numbering uses [semantic versioning](http://semver.org).
Before release 0.1.20, the helm chart can be found in `falcosidekick` [repository](https://github.com/falcosecurity/falcosidekick/tree/master/deploy/helm/falcosidekick).
## 0.5.2
* Update Falcosidekick-UI image (fix wrong redirect to localhost when an ingress is used)
## 0.5.1
* Support `ingressClassName` field in falcosidekick ingresses.
## 0.5.0
### Major Changes
* Add `Policy Report` output
* Add `Syslog` output
* Add `AWS Kinesis` output
* Add `Zoho Cliq` output
* Support IRSA for AWS authentication
* Upgrade Falcosidekick-UI to v2.0.1
### Minor changes
* Allow to set custom Labels for pods
## 0.4.5
* Allow additional service-ui annotations
## 0.4.4
* Fix output after chart installation when ingress is enable
## 0.4.3
* Support `annotation` block in service
## 0.4.2
* Fix: Added the rule to use the podsecuritypolicy
* Fix: Added `ServiceAccountName` to the UI deployment
## 0.4.1
* Removes duplicate `Fission` keys from secret
## 0.4.0
### Major Changes
* Support Ingress API version `networking.k8s.io/v1`, see `ingress.hosts` and `webui.ingress.hosts` in [values.yaml](values.yaml) for a breaking change in the `path` parameter
## 0.3.17
* Fix: Remove the value for bucket of `Yandex S3`, it enabled the output by default
## 0.3.16
### Major Changes
* Fix: set correct new image 2.24.0
## 0.3.15
### Major Changes
* Add `Fission` output
## 0.3.14
### Major Changes
* Add `Grafana` output
* Add `Yandex Cloud S3` output
* Add `Kafka REST` output
### Minor changes
* Docker image is now available on AWS ECR Public Gallery (`--set image.registry=public.ecr.aws`)
## 0.3.13
### Minor changes
* Enable extra volumes and volumemounts for `falcosidekick` via values
## 0.3.12
* Add AWS configuration field `config.aws.rolearn`
## 0.3.11
### Minor changes
* Make image registries for `falcosidekick` and `falcosidekick-ui` configurable
## 0.3.10
### Minor changes
* Fix table formatting in `README.md`
## 0.3.9
### Fixes
* Add missing `imagePullSecrets` in `falcosidekick/templates/deployment-ui.yaml`
## 0.3.8
### Major Changes
* Add `GCP Cloud Run` output
* Add `GCP Cloud Functions` output
* Add `Wavefront` output
* Allow MutualTLS for some outputs
* Add basic auth for Elasticsearch output
## 0.3.7
### Minor changes
* Fix table formatting in `README.md`
* Fix `config.azure.eventHub` parameter name in `README.md`
## 0.3.6
### Fixes
* Point to the correct name of aadpodidentnity
## 0.3.5
### Minor Changes
* Fix link to Falco in the `README.md`
## 0.3.4
### Major Changes
* Bump up version (`v1.0.1`) of image for `falcosidekick-ui`
## 0.3.3
### Minor Changes
* Set default values for `OpenFaaS` output type parameters
* Fixes of documentation
## 0.3.2
### Fixes
* Add config checksum annotation to deployment pods to restart pods on config change
* Fix statsd config options in the secret to make them match the docs
## 0.3.1
### Fixes
* Fix for `s3.bucket`, it should be empty
## 0.3.0
### Major Changes
* Add `AWS S3` output
* Add `GCP Storage` output
* Add `RabbitMQ` output
* Add `OpenFaas` output
## 0.2.9
### Major Changes
* Updated falcosidekuck-ui default image version to `v0.2.0`
## 0.2.8
### Fixes
* Fixed to specify `kafka.hostPort` instead of `kafka.url`
## 0.2.7
### Fixes
* Fixed missing hyphen in podidentity
## 0.2.6
### Fixes
* Fix repo and tag for `ui` image
## 0.2.5
### Major Changes
* Add `CLOUDEVENTS` output
* Add `WEBUI` output
### Minor Changes
* Add details about syntax for adding `custom_fields`
## 0.2.4
### Minor Changes
* Add `DATADOG_HOST` to secret
## 0.2.3
### Minor Changes
* Allow additional pod annotations
* Remove namespace condition in aad-pod-identity
## 0.2.2
### Major Changes
* Add `Kubeless` output
## 0.2.1
### Major Changes
* Add `PagerDuty` output
## 0.2.0
### Major Changes
* Add option to use an existing secret
* Add option to add extra environment variables
* Add `Stan` output
### Minor Changes
* Use the Existing secret resource and add all possible variables to there, and make it simpler to read and less error-prone in the deployment resource
## 0.1.37
### Minor Changes
* Fix aws keys not being added to the deployment
## 0.1.36
### Minor Changes
* Fix helm test
## 0.1.35
### Major Changes
* Update image to use release 2.19.1
## 0.1.34
* New outputs can be set : `Kafka`, `AWS CloudWatchLogs`
## 0.1.33
### Minor Changes
* Fixed GCP Pub/Sub values references in `deployment.yaml`
## 0.1.32
### Major Changes
* Support release namespace configuration
## 0.1.31
### Major Changes
* New outputs can be set : `Googlechat`
## 0.1.30
### Major changes
* New output can be set : `GCP PubSub`
* Custom Headers can be set for `Webhook` output
* Fix typo `aipKey` for OpsGenie output
## 0.1.29
* Fix falcosidekick configuration table to use full path of configuration properties in the `README.md`
## 0.1.28
### Major changes
* New output can be set : `AWS SNS`
* Metrics in `prometheus` format can be scrapped from `/metrics` URI
## 0.1.27
### Minor Changes
* Replace extensions apiGroup/apiVersion because of deprecation
## 0.1.26
### Minor Changes
* Allow the creation of a PodSecurityPolicy, disabled by default
## 0.1.25
### Minor Changes
* Allow the configuration of the Pod securityContext, set default runAsUser and fsGroup values
## 0.1.24
### Minor Changes
* Remove duplicated `webhook` block in `values.yaml`
## 0.1.23
* fake release for triggering CI for auto-publishing
## 0.1.22
### Major Changes
* Add `imagePullSecrets`
## 0.1.21
### Minor Changes
* Fix `Azure Indentity` case sensitive value
## 0.1.20
### Major Changes
* New outputs can be set : `Azure Event Hubs`, `Discord`
### Minor Changes
* Fix wrong port name in output
## 0.1.17
### Major Changes
* New outputs can be set : `Mattermost`, `Rocketchat`
## 0.1.11
### Major Changes
* Add Pod Security Policy
## 0.1.11
### Minor Changes
* Fix wrong value reference for Elasticsearch output in deployment.yaml
## 0.1.10
### Major Changes
* New output can be set : `DogStatsD`
## 0.1.9
### Major Changes
* New output can be set : `StatsD`
## 0.1.7
### Major Changes
* New output can be set : `Opsgenie`
## 0.1.6
### Major Changes
* New output can be set : `NATS`
## 0.1.5
### Major Changes
* `Falcosidekick` and its chart are now part of `falcosecurity` organization
## 0.1.4
### Minor Changes
* Use more recent image with `Golang` 1.14
## 0.1.3
### Major Changes
* New output can be set : `Loki`
## 0.1.2
### Major Changes
* New output can be set : `SMTP`
## 0.1.1
### Major Changes
* New outputs can be set : `AWS Lambda`, `AWS SQS`, `Teams`
## 0.1.0
### Major Changes
* Initial release of Falcosidekick Helm Chart

View File

@ -0,0 +1,16 @@
apiVersion: v1
appVersion: 2.25.0
description: Connect Falco to your ecosystem
home: https://github.com/falcosecurity/falcosidekick
icon: https://raw.githubusercontent.com/falcosecurity/falcosidekick/master/imgs/falcosidekick_color.png
keywords:
- monitoring
- security
- alerting
maintainers:
- email: cncf-falco-dev@lists.cncf.io
name: Issif
name: falcosidekick
sources:
- https://github.com/falcosecurity/falcosidekick
version: 0.5.2

View File

@ -0,0 +1,398 @@
# Falcosidekick
![falcosidekick](https://github.com/falcosecurity/falcosidekick/raw/master/imgs/falcosidekick_color.png)
![release](https://flat.badgen.net/github/release/falcosecurity/falcosidekick/latest?color=green) ![last commit](https://flat.badgen.net/github/last-commit/falcosecurity/falcosidekick) ![licence](https://flat.badgen.net/badge/license/MIT/blue) ![docker pulls](https://flat.badgen.net/docker/pulls/falcosecurity/falcosidekick?icon=docker)
## Description
A simple daemon for connecting [`Falco`](https://github.com/falcosecurity/falco) to your ecossytem. It takes a `Falco`'s events and
forward them to different outputs in a fan-out way.
It works as a single endpoint for as many as you want `Falco` instances :
![falco_with_falcosidekick](https://github.com/falcosecurity/falcosidekick/raw/master/imgs/falco_with_falcosidekick.png)
## Outputs
`Falcosidekick` manages a large variety of outputs with different purposes.
### Chat
- [**Slack**](https://slack.com)
- [**Rocketchat**](https://rocket.chat/)
- [**Mattermost**](https://mattermost.com/)
- [**Teams**](https://products.office.com/en-us/microsoft-teams/group-chat-software)
- [**Discord**](https://www.discord.com/)
- [**Google Chat**](https://workspace.google.com/products/chat/)
- [**Zoho Cliq**](https://www.zoho.com/cliq/)
### Metrics / Observability
- [**Datadog**](https://www.datadoghq.com/)
- [**Influxdb**](https://www.influxdata.com/products/influxdb-overview/)
- [**StatsD**](https://github.com/statsd/statsd) (for monitoring of `falcosidekick`)
- [**DogStatsD**](https://docs.datadoghq.com/developers/dogstatsd/?tab=go) (for monitoring of `falcosidekick`)
- [**Prometheus**](https://prometheus.io/) (for both events and monitoring of `falcosidekick`)
- [**Wavefront**](https://www.wavefront.com)
### Alerting
- [**AlertManager**](https://prometheus.io/docs/alerting/alertmanager/)
- [**Opsgenie**](https://www.opsgenie.com/)
- [**PagerDuty**](https://pagerduty.com/)
### Logs
- [**Elasticsearch**](https://www.elastic.co/)
- [**Loki**](https://grafana.com/oss/loki)
- [**AWS CloudWatchLogs**](https://aws.amazon.com/cloudwatch/features/)
- [**Grafana**](https://grafana.com/) (annotations)
- **Syslog**
### Object Storage
- [**AWS S3**](https://aws.amazon.com/s3/features/)
- [**GCP Storage**](https://cloud.google.com/storage)
- [**Yandex S3 Storage**](https://cloud.yandex.com/en-ru/services/storage)
### FaaS / Serverless
- [**AWS Lambda**](https://aws.amazon.com/lambda/features/)
- [**Kubeless**](https://kubeless.io/)
- [**OpenFaaS**](https://www.openfaas.com)
- [**GCP Cloud Run**](https://cloud.google.com/run)
- [**GCP Cloud Functions**](https://cloud.google.com/functions)
- [**Fission**](https://fission.io)
### Message queue / Streaming
- [**NATS**](https://nats.io/)
- [**STAN (NATS Streaming)**](https://docs.nats.io/nats-streaming-concepts/intro)
- [**AWS SQS**](https://aws.amazon.com/sqs/features/)
- [**AWS SNS**](https://aws.amazon.com/sns/features/)
- [**AWS Kinesis**](https://aws.amazon.com/kinesis/)
- [**GCP PubSub**](https://cloud.google.com/pubsub)
- [**Apache Kafka**](https://kafka.apache.org/)
- [**Kafka Rest Proxy**](https://docs.confluent.io/platform/current/kafka-rest/index.html)
- [**RabbitMQ**](https://www.rabbitmq.com/)
- [**Azure Event Hubs**](https://azure.microsoft.com/en-in/services/event-hubs/)
### Email
- **SMTP**
### Web
- **Webhook**
- [**WebUI**](https://github.com/falcosecurity/falcosidekick-ui) (a Web UI for displaying latest events in real time)
### Other
- [**Policy Report**](https://github.com/kubernetes-sigs/wg-policy-prototypes/tree/master/policy-report/falco-adapter)
## Adding `falcosecurity` repository
Prior to install the chart, add the `falcosecurity` charts repository:
```bash
helm repo add falcosecurity https://falcosecurity.github.io/charts
helm repo update
```
## Installing the Chart
### Install Falco + Falcosidekick + Falcosidekick-ui
To install the chart with the release name `falcosidekick` run:
```bash
helm install falcosidekick falcosecurity/falcosidekick --set webui.enabled=true
```
### With Helm chart of Falco
`Falco`, `Falcosidekick` and `Falcosidekick-ui` can be installed together in one command. All values to configure `Falcosidekick` will have to be
prefixed with `falcosidekick.`.
```bash
helm install falco falcosecurity/falco --set falcosidekick.enabled=true --set falcosidekick.webui.enabled=true
```
After a few seconds, Falcosidekick should be running.
> **Tip**: List all releases using `helm list`, a release is a name used to track a specific deployment
## Minumiun Kubernetes version
The minimum Kubernetes version required is 1.17.x
## Uninstalling the Chart
To uninstall the `falcosidekick` deployment:
```bash
helm uninstall falcosidekick
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Configuration
The following table lists the main configurable parameters of the Falcosidekick chart and their default values. See `values.yaml` for full list.
| Parameter | Description | Default |
| ------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------- |
| `replicaCount` | number of running pods | `1` |
| `podAnnotations` | additions annotations on the pods | `{}` |
| `podLabels` | additions labels on the pods | `{}` |
| `listenport` | port to listen for daemon | `2801` |
| `resources` | the resources for falcosdekick pods | `2801` |
| `config.debug` | if *true* all outputs will print in stdout the payload they send | `false` |
| `config.customfields` | a list of escaped comma separated custom fields to add to falco events, syntax is "key:value\,key:value" | |
| `config.mutualtlsfilespath` | folder which will used to store client.crt, client.key and ca.crt files for mutual tls | `/etc/certs` |
| `config.checkcert` | check if ssl certificate of the output is valid | `true` |
| `config.slack.webhookurl` | Slack Webhook URL (ex: https://hooks.slack.com/services/XXXX/YYYY/ZZZZ), if not `empty`, Slack output is *enabled* | |
| `config.slack.footer` | Slack Footer | https://github.com/falcosecurity/falcosidekick |
| `config.slack.icon` | Slack icon (avatar) | https://raw.githubusercontent.com/falcosecurity/falcosidekick/master/imgs/falcosidekick_color.png |
| `config.slack.username` | Slack username | `falcosidekick` |
| `config.slack.outputformat` | `all` (default), `text` (only text is displayed in Slack), `fields` (only fields are displayed in Slack) | `all` |
| `config.slack.minimumpriority` | minimum priority of event for using use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` | `debug` |
| `config.slack.messageformat` | a Go template to format Slack Text above Attachment, displayed in addition to the output from `slack.outputformat`. If empty, no Text is displayed before Attachment | |
| `config.rocketchat.webhookurl` | Rocketchat Webhook URL (ex: https://XXXX/hooks/YYYY), if not `empty`, Rocketchat output is *enabled* | |
| `config.rocketchat.icon` | Rocketchat icon (avatar) | https://raw.githubusercontent.com/falcosecurity/falcosidekick/master/imgs/falcosidekick_color.png |
| `config.rocketchat.username` | Rocketchat username | `falcosidekick` |
| `config.rocketchat.outputformat` | `all` (default), `text` (only text is displayed in Rocketcaht), `fields` (only fields are displayed in Rocketchat) | `all` |
| `config.rocketchat.minimumpriority` | minimum priority of event for using use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` | `debug` |
| `config.rockerchat.messageformat` | a Go template to format Rocketchat Text above Attachment, displayed in addition to the output from `slack.outputformat`. If empty, no Text is displayed before Attachment | |
| `config.rockerchat.mutualtls` | if true, checkcert flag will be ignored (server cert will always be checked) | |
| `config.rockerchat.checkcert` | check if ssl certificate of the output is valid | `true` |
| `config.mattermost.webhookurl` | Mattermost Webhook URL (ex: https://XXXX/hooks/YYYY), if not `empty`, Mattermost output is *enabled* | |
| `config.mattermost.footer` | Mattermost Footer | https://github.com/falcosecurity/falcosidekick |
| `config.mattermost.icon` | Mattermost icon (avatar) | https://raw.githubusercontent.com/falcosecurity/falcosidekick/master/imgs/falcosidekick_color.png |
| `config.mattermost.username` | Mattermost username | `falcosidekick` |
| `config.mattermost.outputformat` | `all` (default), `text` (only text is displayed in Slack), `fields` (only fields are displayed in Mattermost) | `all` |
| `config.mattermost.minimumpriority` | minimum priority of event for using use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` | `debug` |
| `config.mattermost.messageformat` | a Go template to format Mattermost Text above Attachment, displayed in addition to the output from `slack.outputformat`. If empty, no Text is displayed before Attachment | |
| `config.mattermost.mutualtls` | if true, checkcert flag will be ignored (server cert will always be checked) | |
| `config.mattermost.checkcert` | check if ssl certificate of the output is valid | `true` |
| `config.teams.webhookurl` | Teams Webhook URL (ex: https://outlook.office.com/webhook/XXXXXX/IncomingWebhook/YYYYYY"), if not `empty`, Teams output is *enabled* | |
| `config.teams.activityimage` | Teams section image | https://raw.githubusercontent.com/falcosecurity/falcosidekick/master/imgs/falcosidekick_color.png |
| `config.teams.outputformat` | `all` (default), `text` (only text is displayed in Teams), `facts` (only facts are displayed in Teams) | `all` |
| `config.teams.minimumpriority` | minimum priority of event for using use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` | `debug` |
| `config.datadog.apikey` | Datadog API Key, if not `empty`, Datadog output is *enabled* | |
| `config.datadog.host` | Datadog host. Override if you are on the Datadog EU site. Defaults to american site with "https://api.datadoghq.com" | https://api.datadoghq.com |
| `config.datadog.minimumpriority` | minimum priority of event for using use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` | `debug` |
| `config.discord.webhookurl` | Discord WebhookURL (ex: https://discord.com/api/webhooks/xxxxxxxxxx...), if not empty, Discord output is *enabled* | |
| `config.discord.icon` | Discord icon (avatar) | https://raw.githubusercontent.com/falcosecurity/falcosidekick/master/imgs/falcosidekick_color.png |
| `config.discord.minimumpriority` | minimum priority of event for using use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` | `debug` |
| `config.alertmanager.hostport` | AlertManager http://host:port, if not `empty`, AlertManager is *enabled* | |
| `config.alertmanager.endpoint` | alertmanager endpoint on which falcosidekick posts alerts, choice is: `"/api/v1/alerts" or "/api/v2/alerts" , default is "/api/v1/alerts"` | |
| `config.alertmanager.minimumpriority` | minimum priority of event for using use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` | `debug` |
| `config.alertmanager.mutualtls` | if true, checkcert flag will be ignored (server cert will always be checked) | |
| `config.alertmanager.checkcert` | check if ssl certificate of the output is valid | `true` |
| `config.elasticsearch.hostport` | Elasticsearch http://host:port, if not `empty`, Elasticsearch is *enabled* | |
| `config.elasticsearch.index` | Elasticsearch index | `falco` |
| `config.elasticsearch.type` | Elasticsearch document type | `event` |
| `config.elasticsearch.suffix` | date suffix for index rotation : `daily`, `monthly`, `annually`, `none` | `daily` |
| `config.elasticsearch.username` | use this username to authenticate to Elasticsearch if the username is not empty | |
| `config.elasticsearch.password` | use this password to authenticate to Elasticsearch if the password is not empty | |
| `config.elasticsearch.minimumpriority` | minimum priority of event for using use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` | `debug` |
| `config.elasticsearch.mutualtls` | if true, checkcert flag will be ignored (server cert will always be checked) | |
| `config.elasticsearch.checkcert` | check if ssl certificate of the output is valid | `true` |
| `config.influxdb.hostport` | Influxdb http://host:port, if not `empty`, Influxdb is *enabled* | |
| `config.influxdb.database` | Influxdb database | `falco` |
| `config.influxdb.user` | User to use if auth is *enabled* in Influxdb | |
| `config.influxdb.password` | Password to use if auth is *enabled* in Influxdb | |
| `config.influxdb.minimumpriority` | minimum priority of event for using use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` | `debug` |
| `config.influxdb.mutualtls` | if true, checkcert flag will be ignored (server cert will always be checked) | |
| `config.influxdb.checkcert` | check if ssl certificate of the output is valid | `true` |
| `config.loki.hostport` | Loki http://host:port, if not `empty`, Loki is *enabled* | |
| `config.loki.endpoint` | Loki endpoint URL path, default is "/api/prom/push" more info: https://grafana.com/docs/loki/latest/api/#post-apiprompush | |
| `config.loki.tenant` | Loki tenant, if not `empty`, Loki tenant is *enabled* | |
| `config.loki.hostport` | Loki http://host:port, if not `empty`, Loki is *enabled* | |
| `config.loki.minimumpriority` | minimum priority of event for using use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` | `debug` |
| `config.loki.mutualtls` | if true, checkcert flag will be ignored (server cert will always be checked) | |
| `config.loki.checkcert` | check if ssl certificate of the output is valid | `true` |
| `config.nats.hostport` | NATS "nats://host:port", if not `empty`, NATS is *enabled* | |
| `config.nats.mutualtls` | if true, checkcert flag will be ignored (server cert will always be checked) | |
| `config.nats.checkcert` | check if ssl certificate of the output is valid | `true` |
| `config.nats.minimumpriority` | minimum priority of event for using use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` | `debug` |
| `config.stan.hostport` | Stan nats://{domain or ip}:{port}, if not empty, STAN output is *enabled* | |
| `config.stan.clusterid` | Cluster name, if not empty, STAN output is *enabled* | `debug` |
| `config.stan.clientid` | Client ID, if not empty, STAN output is *enabled* | |
| `config.stan.mutualtls` | if true, checkcert flag will be ignored (server cert will always be checked) | |
| `config.stan.checkcert` | check if ssl certificate of the output is valid | `true` |
| `config.stan.minimumpriority` | minimum priority of event for using use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` | `debug` |
| `config.aws.accesskeyid` | AWS Access Key Id (optionnal if you use EC2 Instance Profile) | |
| `config.aws.rolearn` | AWS IAM role ARN for falcosidekick service account to associate with (optionnal if you use EC2 Instance Profile) | |
| `config.aws.secretaccesskey` | AWS Secret Access Key (optionnal if you use EC2 Instance Profile) | |
| `config.aws.region` | AWS Region (optionnal if you use EC2 Instance Profile) | |
| `config.aws.cloudwatchlogs.loggroup` | AWS CloudWatch Logs Group name, if not empty, CloudWatch Logs output is *enabled* | |
| `config.aws.cloudwatchlogs.logstream` | AWS CloudWatch Logs Stream name, if empty, Falcosidekick will try to create a log stream | `debug` |
| `config.aws.cloudwatchlogs.minimumpriority` | minimum priority of event for using use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` | `debug` |
| `config.aws.lambda.functionname` | AWS Lambda Function Name, if not empty, AWS Lambda output is *enabled* | |
| `config.aws.lambda.minimumpriority` | minimum priority of event for using use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` | `debug` |
| `config.aws.sns.topicarn` | AWS SNS TopicARN, if not empty, AWS SNS output is *enabled* | |
| `config.aws.sns.rawjson` | Send RawJSON from `falco` or parse it to AWS SNS | |
| `config.aws.sns.minimumpriority` | minimum priority of event for using use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` | `debug` |
| `config.aws.sqs.url` | AWS SQS Queue URL, if not empty, AWS SQS output is *enabled* | |
| `config.aws.sqs.minimumpriority` | minimum priority of event for using use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` | `debug` |
| `config.aws.s3.bucket` | AWS S3, bucket name | |
| `config.aws.s3.prefix` | AWS S3, name of prefix, keys will have format: s3://<bucket>/<prefix>/YYYY-MM-DD/YYYY-MM-DDTHH:mm:ss.s+01:00.json | |
| `config.aws.s3.minimumpriority` | minimum priority of event for using use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` | `debug` |
| `config.aws.kinesis.streamname` | AWS Kinesis Stream Name, if not empty, Kinesis output is *enabled* | |
| `config.aws.kinesis.minimumpriority` | minimum priority of event for using use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` | `debug` |
| `config.smtp.hostport` | "host:port" address of SMTP server, if not empty, SMTP output is *enabled* | |
| `config.smtp.user` | user to access SMTP server | |
| `config.smtp.password` | password to access SMTP server | |
| `config.smtp.from` | Sender address (mandatory if SMTP output is *enabled*) | |
| `config.smtp.to` | comma-separated list of Recipident addresses, can't be empty (mandatory if SMTP output is *enabled*) | |
| `config.smtp.outputformat` | html, text | `html` |
| `config.smtp.minimumpriority` | minimum priority of event for using use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` | `debug` |
| `config.opsgenie.apikey` | Opsgenie API Key, if not empty, Opsgenie output is *enabled* | |
| `config.opsgenie.region` | (`us` or `eu`) region of your domain | `us` |
| `config.opsgenie.mutualtls` | if true, checkcert flag will be ignored (server cert will always be checked) | |
| `config.opsgenie.checkcert` | check if ssl certificate of the output is valid | `true` |
| `config.opsgenie.minimumpriority` | minimum priority of event for using use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` | `debug` |
| `config.statsd.forwarder` | The address for the StatsD forwarder, in the form http://host:port, if not empty StatsD is *enabled* | |
| `config.statsd.namespace` | A prefix for all metrics | `falcosidekick` |
| `config.dogstatsd.forwarder` | The address for the DogStatsD forwarder, in the form http://host:port, if not empty DogStatsD is *enabled* | |
| `config.dogstatsd.namespace` | A prefix for all metrics | `falcosidekick` |
| `config.dogstatsd.tags` | A comma-separated list of tags to add to all metrics | |
| `config.webhook.address` | Webhook address, if not empty, Webhook output is *enabled* | |
| `config.webhook.customHeaders` | a list of comma separated custom headers to add, syntax is "key:value\,key:value" | |
| `config.webhook.mutualtls` | if true, checkcert flag will be ignored (server cert will always be checked) | |
| `config.webhook.checkcert` | check if ssl certificate of the output is valid | `true` |
| `config.webhook.minimumpriority` | minimum priority of event for using use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` | `debug` |
| `config.azure.eventHub.name` | Name of the Hub, if not empty, EventHub is *enabled* | |
| `config.azure.eventHub.namespace` | Name of the space the Hub is in | |
| `config.azure.eventHub.minimumpriority` | minimum priority of event for using use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` | `debug` |
| `config.gcp.credentials` | Base64 encoded JSON key file for the GCP service account | |
| `config.gcp.pubsub.projectid` | ID of the GCP project | |
| `config.gcp.pubsub.topic` | Name of the Pub/Sub topic | |
| `config.gcp.eventhub.minimumpriority` | minimum priority of event for using use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` | `debug` |
| `config.gcp.storage.prefix` | Name of prefix, keys will have format: gs://<bucket>/<prefix>/YYYY-MM-DD/YYYY-MM-DDTHH:mm:ss.s+01:00.json | |
| `config.gcp.storage.bucket` | The name of the bucket | |
| `config.gcp.storage.minimumpriority` | minimum priority of event for using use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` | `debug` |
| `config.gcp.cloudfunctions.name` | The name of the Cloud Function which is in form `projects/<project_id>/locations/<region>/functions/<function_name>` | |
| `config.gcp.cloudfunctions.minimumpriority` | minimum priority of event for using use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` | |
| `config.gcp.cloudrun.enpoint` | the URL of the Cloud Run function | |
| `config.gcp.cloudrun.jwt` | JWT for the private access to Cloud Run function | |
| `config.gcp.cloudrun.minimumpriority` | minimum priority of event for using use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` | |
| `config.googlechat.webhookurl` | Google Chat Webhook URL (ex: https://chat.googleapis.com/v1/spaces/XXXXXX/YYYYYY), if not `empty`, Google Chat output is *enabled* | |
| `config.googlechat.outputformat` | `all` (default), `text` (only text is displayed in Google chat) | `all` |
| `config.googlechat.minimumpriority` | minimum priority of event for using use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` | `debug` |
| `config.googlechat.messageformat` | a Go template to format Google Chat Text above Attachment, displayed in addition to the output from `config.googlechat.outputformat`. If empty, no Text is displayed before Attachment | |
| `config.kafka.hostport` | The Host:Port of the Kafka (ex: kafka:9092). if not empty, Kafka output is *enabled* | |
| `config.kafka.topic` | `all` (default), `text` (only text is displayed in Google chat) | `all` |
| `config.kafka.partition` | a Go template to format Google Chat Text above Attachment, displayed in addition to the output from `config.googlechat.outputformat`. If empty, no Text is displayed before Attachment | |
| `config.kafka.minimumpriority` | minimum priority of event for using use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` | `debug` |
| `config.pagerduty.routingkey` | Pagerduty Routing Key, if not empty, Pagerduty output is *enabled* | |
| `config.pagerduty.minimumpriority` | minimum priority of event for using use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` | `debug` |
| `config.kubeless.function` | Name of Kubeless function, if not empty, EventHub is *enabled* | |
| `config.kubeless.namespace` | Namespace of Kubeless function (mandatory) | |
| `config.kubeless.port` | Port of service of Kubeless function. Default is `8080` | |
| `config.kubeless.mutualtls` | if true, checkcert flag will be ignored (server cert will always be checked) | |
| `config.kubeless.checkcert` | check if ssl certificate of the output is valid | `true` |
| `config.kubeless.minimumpriority` | minimum priority of event for using use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` | `debug` |
| `config.openfaas.functionname` | Name of OpenFaaS function, if not empty, OpenFaaS is *enabled* | |
| `config.openfaas.functionnamespace` | Namespace of OpenFaaS function, "openfaas-fn" (default) | `openfaas-fn` |
| `config.openfaas.gatewayservice` | Service of OpenFaaS Gateway, "gateway" (default) | `gateway` |
| `config.openfaas.gatewayport` | Port of service of OpenFaaS Gateway Default is `8080` | `8080` |
| `config.openfaas.gatewaynamespace` | Namespace of OpenFaaS Gateway, "openfaas" (default) | `openfaas` |
| `config.openfaas.mutualtls` | if true, checkcert flag will be ignored (server cert will always be checked) | | | `openfaas`
| `config.openfaas.checkcert` | check if ssl certificate of the output is valid | `true` | | `openfaas`
| `config.openfaas.minimumpriority` | minimum priority of event for using use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` | `debug` |
| `config.cloudevents.address` | CloudEvents consumer http address, if not empty, CloudEvents output is *enabled* | |
| `config.cloudevents.extension` | Extensions to add in the outbound Event, useful for routing | |
| `config.cloudevents.minimumpriority` | minimum priority of event for using use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` | `debug` |
| `config.rabbitmq.url` | Rabbitmq URL, if not empty, Rabbitmq output is *enabled* | |
| `config.rabbitmq.queue` | Rabbitmq Queue name | |
| `config.rabbitmq.minimumpriority` | minimum priority of event for using use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` | `debug` |
| `config.wavefront.endpointtype` | Wavefront endpoint type, must be 'direct' or 'proxy'. If not empty, with endpointhost, Wavefront output is *enabled* | |
| `config.wavefront.endpointhost` | Wavefront endpoint address (only the host). If not empty, with endpointhost, Wavefront output is *enabled* | |
| `config.wavefront.endpointtoken` | Wavefront token. Must be used only when endpointtype is 'direct' | |
| `config.wavefront.endpointmetricport` | Port to send metrics. Only used when endpointtype is 'proxy' | `2878` |
| `config.wavefront.metricname` | Metric to be created in Wavefront. Defaults to falco.alert | `falco.alert` |
| `config.wavefront.batchsize` | Wavefront batch size. If empty uses the default 10000. Only used when endpointtype is 'direct' | `10000` |
| `config.wavefront.flushintervalseconds` | Wavefront flush interval in seconds. Defaults to 1 | `1` |
| `config.wavefront.minimumpriority` | minimum priority of event for using use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` | `debug` |
| `config.grafana.hostport` | http://{domain or ip}:{port}, if not empty, Grafana output is *enabled* | |
| `config.grafana.apikey` | API Key to authenticate to Grafana, if not empty, Grafana output is *enabled* | |
| `config.grafana.dashboardid` | annotations are scoped to a specific dashboard. Optionnal. | |
| `config.grafana.panelid` | annotations are scoped to a specific panel. Optionnal. | |
| `config.grafana.allfieldsastags` | if true, all custom fields are added as tags | `false` |
| `config.grafana.mutualtls` | if true, checkcert flag will be ignored (server cert will always be checked) | `false` |
| `config.grafana.checkcert` | check if ssl certificate of the output is valid | `true` |
| `config.grafana.minimumpriority` | minimum priority of event for using use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` | `debug` |
| `config.fission.function` | Name of Fission function, if not empty, Fission is *enabled* | |
| `config.fission.routernamespace` | Namespace of Fission Router | `fission` |
| `config.fission.routerservice` | Service of Fission Router | `router` |
| `config.fission.routerport` | Port of service of Fission Router | `80` |
| `config.fission.minimumpriority` | minimum priority of event for using use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` | `debug` |
| `config.fission.checkcert` | check if ssl certificate of the output is valid | `true` |
| `config.fission.mutualtls` | if true, checkcert flag will be ignored (server cert will always be checked) | `false` |
| `config.yandex.accesskeyid` | yandex access key | |
| `config.yandex.secretaccesskey` | yandex secret access key | |
| `config.yandex.region` | yandex storage region | `u-central-1` |
| `config.yandex.s3.endpoint` | yandex storage endpoint (default: https://storage.yandexcloud.net) | |
| `config.yandex.s3.bucket` | Yandex storage, bucket name | `falcosidekick` |
| `config.yandex.s3.prefix` | name of prefix, keys will have format: s3://<bucket>/<prefix>/YYYY-MM-DD/YYYY-MM-DDTHH:mm:ss.s+01:00.json | |
| `config.yandex.s3.minimumpriority` | minimum priority of event for using use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` | `debug` |
| `config.kafkarest.address` | The full URL to the topic (example "http://kafkarest:8082/topics/test") | |
| `config.kafkarest.version` | Kafka Rest Proxy API version 2 | `2` |
| `config.kafkarest.minimumpriority` | minimum priority of event for using use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` | `debug` |
| `config.kafkarest.checkcert` | check if ssl certificate of the output is valid | `true` |
| `config.kafkarest.mutualtls` | if true, checkcert flag will be ignored (server cert will always be checked) | `false` |
| `config.syslog.host` | Syslog Host, if not empty, Syslog output is *enabled* | |
| `config.syslog.port` | Syslog endpoint port number | |
| `config.syslog.protocol` | Syslog transport protocol. It can be either "tcp" or "udp" | `tcp` |
| `config.syslog.minimumpriority` | minimum priority of event for using use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` | `debug` |
| `config.cliq.webhookurl` | Zoho Cliq Channel URL (ex: https://cliq.zoho.eu/api/v2/channelsbyname/XXXX/message?zapikey=YYYY), if not empty, Cliq Chat output is *enabled* | |
| `config.cliq.icon` | Cliq icon (avatar) | |
| `config.cliq.useemoji` | Prefix message text with an emoji | `true` |
| `config.cliq.outputformat` | `all` (default), `text` (only text is displayed in Cliq), `fields` (only fields are displayed in Cliq) | `all` |
| `config.cliq.message format` | a Go template to format Google Chat Text above Attachment, displayed in addition to the output from `cliq.outputformat`. If empty, no Text is displayed before sections. | |
| `config.cliq.minimumpriority` | minimum priority of event for using use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` | `debug` |
| `config.policyreport.enabled` | if true; policyreport output is *enabled* | `false` |
| `config.policyreport.kubeconfig` | Kubeconfig file to use (only if falcosidekick is running outside the cluster) | `~/.kube/config` |
| `config.policyreport.maxevents` | the max number of events that can be in a policyreport | `1000` |
| `config.policyreport.prunebypriority` | if true; the events with lowest severity are pruned first, in FIFO order | `false` |
| `config.policyreport.minimumpriority` | minimum priority of event for using use this output, order is `emergency\|alert\|critical\|error\|warning\|notice\|informational\|debug or ""` | `debug` |
| `image.registry` | The image registry to pull from | `docker.io` |
| `image.repository` | The image repository to pull from | `falcosecurity/falcosidekick` |
| `image.tag` | The image tag to pull | `2.23.1` |
| `image.pullPolicy` | The image pull policy | `IfNotPresent` |
| `extraVolumes` | Extra volumes for sidekick deployment | |
| `extraVolumeMounts` | Extra volume mounts for sidekick deployment | |
| `webui.enabled` | enable Falcosidekick-UI | `false` |
| `webui.podAnnotations` | additions annotations on the pods web UI | `{}` |
| `webui.podLabels` | additions labels on the pods web UI | `{}` |
| `webui.image.registry` | The web UI image registry to pull from | `docker.io` |
| `webui.image.repository` | The web UI image repository to pull from | `falcosecurity/falcosidekick-ui` |
| `webui.image.tag` | The web UI image tag to pull | `v1.1.0` |
| `webui.image.pullPolicy` | The web UI image pull policy | `IfNotPresent` |
| `webui.resources` | The resources for the web UI pods | `v1.1.0` |
| `webui.service.type` | The web UI service type (i. e: LoadBalancer) | `ClusterIP` |
| `webui.service.port` | The web UI service port dor the falcosidekick-ui | `2802` |
| `webui.service.nodePort` | The web UI service nodePort | `30282` |
| `webui.service.targetPort` | The web UI service targetPort | `2802` |
| `webui.service.annotations` | The web UI service annotations (use this to set a internal LB, for example.) | `{}` |
| `webui.redis.image.registry` | The web UI image registry to pull from | `docker.io` |
| `webui.redis.image.repository` | The web UI image repository to pull from | `falcosecurity/falcosidekick-ui` |
| `webui.redis.image.tag` | The web UI image tag to pull | `v1.1.0` |
| `webui.redis.image.pullPolicy` | The web UI image pull policy | `IfNotPresent` |
| `webui.redis.podAnnotations` | additions annotations on the pods | `{}` |
| `webui.redis.podLabels` | additions labels on the pods | `{}` |
| `webui.redis.resources` | The resources for the redis pod | `v1.1.0` |
| `webui.redis.storageSize` | Size of the PVC for the redis pod | `v1.1.0` |
| `webui.redis.storageClass` | Storage class of the PVC for the redis pod | `v1.1.0` |
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
> **Tip**: You can use the default [values.yaml](values.yaml)
## Metrics
A `prometheus` endpoint can be scrapped at `/metrics`.

View File

@ -0,0 +1,44 @@
1. Get the URL for Falcosidekick by running these commands:
{{- if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "falcosidekick.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "falcosidekick.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "falcosidekick.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
kubectl port-forward svc/{{ include "falcosidekick.name" . }} {{ .Values.service.port }}:{{ .Values.service.port }} --namespace {{ .Release.Namespace }}
echo "Visit http://127.0.0.1:{{ .Values.service.port }} to use your application"
{{- end }}
{{- if .Values.webui.enabled }}
2. Get the URL for Falcosidekick-UI (WebUI) by running these commands:
{{- if .Values.webui.ingress.enabled }}
{{- range $host := .Values.webui.ingress.hosts }}
http{{ if $.Values.webui.ingress.tls }}s{{ end }}://{{ $host.host }}{{ index .paths 0 }}
{{- end }}
{{- else if contains "NodePort" .Values.webui.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "falcosidekick.fullname" . }})-ui
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT/ui
{{- else if contains "LoadBalancer" .Values.webui.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "falcosidekick.fullname" . }}-ui'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "falcosidekick.fullname" . }}-ui -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
echo http://$SERVICE_IP:{{ .Values.webui.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
kubectl port-forward svc/{{ include "falcosidekick.name" . }}-ui {{ .Values.webui.service.port }}:{{ .Values.webui.service.port }} --namespace {{ .Release.Namespace }}
echo "Visit http://127.0.0.1:{{ .Values.webui.service.port }}/ui to use your application"
{{- end }}
{{ else }}
2. Try to enable Falcosidekick-UI (WebUI) by adding this argument to your command:
--set webui.enabled=true
{{- end }}

View File

@ -0,0 +1,59 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "falcosidekick.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "falcosidekick.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "falcosidekick.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for ingress.
*/}}
{{- define "falcosidekick.ingress.apiVersion" -}}
{{- if and (.Capabilities.APIVersions.Has "networking.k8s.io/v1") (semverCompare ">= 1.19-0" .Capabilities.KubeVersion.Version) -}}
{{- print "networking.k8s.io/v1" -}}
{{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" -}}
{{- print "networking.k8s.io/v1beta1" -}}
{{- else -}}
{{- print "extensions/v1beta1" -}}
{{- end -}}
{{- end -}}
{{/*
Return if ingress is stable.
*/}}
{{- define "falcosidekick.ingress.isStable" -}}
{{- eq (include "falcosidekick.ingress.apiVersion" .) "networking.k8s.io/v1" -}}
{{- end -}}
{{/*
Return if ingress supports pathType.
*/}}
{{- define "falcosidekick.ingress.supportsPathType" -}}
{{- or (eq (include "falcosidekick.ingress.isStable" .) "true") (and (eq (include "falcosidekick.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) -}}
{{- end -}}

View File

@ -0,0 +1,20 @@
{{- if and .Values.config.azure.podIdentityClientID .Values.config.azure.podIdentityName -}}
---
apiVersion: "aadpodidentity.k8s.io/v1"
kind: AzureIdentity
metadata:
name: {{ include "falcosidekick.fullname" . }}
namespace: {{ .Release.Namespace }}
spec:
type: 0
resourceID: /subscriptions/{{ .Values.config.azure.subscriptionID }}/resourcegroups/{{ .Values.config.azure.resourceGroupName }}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{{ .Values.config.azure.podIdentityName }}
clientID: {{ .Values.config.azure.podIdentityClientID }}
---
apiVersion: "aadpodidentity.k8s.io/v1"
kind: AzureIdentityBinding
metadata:
name: {{ include "falcosidekick.fullname" . }}
spec:
azureIdentity: {{ include "falcosidekick.fullname" . }}
selector: {{ include "falcosidekick.fullname" . }}
{{- end }}

View File

@ -0,0 +1,21 @@
{{- if .Values.podSecurityPolicy.create }}
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ template "falcosidekick.fullname" .}}
labels:
app: {{ template "falcosidekick.fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: "{{ .Release.Name }}"
heritage: "{{ .Release.Service }}"
rules:
- apiGroups:
- policy
resources:
- podsecuritypolicies
resourceNames:
- {{ template "falcosidekick.fullname" . }}
verbs:
- use
{{- end }}

View File

@ -0,0 +1,176 @@
{{- if .Values.webui.enabled }}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "falcosidekick.fullname" . }}-ui
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ include "falcosidekick.name" . }}-ui
helm.sh/chart: {{ include "falcosidekick.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}-ui
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
replicas: {{ .Values.webui.replicaCount }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "falcosidekick.name" . }}-ui
app.kubernetes.io/instance: {{ .Release.Name }}-ui
template:
metadata:
labels:
app.kubernetes.io/name: {{ include "falcosidekick.name" . }}-ui
app.kubernetes.io/instance: {{ .Release.Name }}-ui
{{- if .Values.webui.podLabels }}
{{ toYaml .Values.webui.podLabels | indent 8 }}
{{- end }}
{{- if .Values.webui.podAnnotations }}
annotations:
{{ toYaml .Values.webui.podAnnotations | indent 8 }}
{{- end }}
spec:
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
{{- range .Values.imagePullSecrets }}
- name: {{ . }}
{{- end }}
{{- end }}
serviceAccountName: {{ include "falcosidekick.fullname" . }}-ui
{{- if .Values.webui.priorityClassName }}
priorityClassName: "{{ .Values.webui.priorityClassName }}"
{{- end }}
securityContext:
runAsUser: {{ .Values.webui.podSecurityContext.runAsUser }}
fsGroup: {{ .Values.webui.podSecurityContext.fsGroup }}
containers:
- name: {{ .Chart.Name }}-ui
image: "{{ .Values.webui.image.registry }}/{{ .Values.webui.image.repository }}:{{ .Values.webui.image.tag }}"
imagePullPolicy: {{ .Values.webui.image.pullPolicy }}
args:
- "-r"
- {{ include "falcosidekick.fullname" . }}-ui-redis{{ if .Values.webui.redis.fullfqdn }}.{{ .Release.Namespace }}.svc.cluster.local{{ end }}:{{ .Values.webui.redis.service.port | default "6379" }}
ports:
- name: http
containerPort: 2802
protocol: TCP
livenessProbe:
httpGet:
path: /api/v1/healthz
port: http
initialDelaySeconds: 10
periodSeconds: 5
readinessProbe:
httpGet:
path: /api/v1/healthz
port: http
initialDelaySeconds: 10
periodSeconds: 5
resources:
{{- toYaml .Values.webui.resources | nindent 12 }}
{{- with .Values.webui.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.webui.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.webui.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ include "falcosidekick.fullname" . }}-ui-redis
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ include "falcosidekick.name" . }}-ui-redis
helm.sh/chart: {{ include "falcosidekick.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}-ui
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
replicas: 1
serviceName: {{ include "falcosidekick.fullname" . }}-ui-redis
selector:
matchLabels:
app.kubernetes.io/name: {{ include "falcosidekick.name" . }}-ui-redis
app.kubernetes.io/instance: {{ .Release.Name }}-ui-redis
template:
metadata:
labels:
app.kubernetes.io/name: {{ include "falcosidekick.name" . }}-ui-redis
app.kubernetes.io/instance: {{ .Release.Name }}-ui-redis
{{- if .Values.webui.redis.podLabels }}
{{ toYaml .Values.webui.redis.podLabels | indent 8 }}
{{- end }}
{{- if .Values.webui.redis.podAnnotations }}
annotations:
{{ toYaml .Values.webui.redis.podAnnotations | indent 8 }}
{{- end }}
spec:
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
{{- range .Values.imagePullSecrets }}
- name: {{ . }}
{{- end }}
{{- end }}
serviceAccountName: {{ include "falcosidekick.fullname" . }}-ui
{{- if .Values.webui.redis.priorityClassName }}
priorityClassName: "{{ .Values.webui.redis.priorityClassName }}"
{{- end }}
containers:
- name: redis
image: "{{ .Values.webui.redis.image.registry }}/{{ .Values.webui.redis.image.repository }}:{{ .Values.webui.redis.image.tag }}"
imagePullPolicy: {{ .Values.webui.redis.image.pullPolicy }}
args: []
ports:
- name: redis
containerPort: 6379
protocol: TCP
livenessProbe:
tcpSocket:
port: 6379
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 3
readinessProbe:
tcpSocket:
port: 6379
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 3
volumeMounts:
- name: {{ include "falcosidekick.fullname" . }}-ui-redis-data
mountPath: /data
resources:
{{- toYaml .Values.webui.redis.resources | nindent 12 }}
{{- with .Values.webui.redis.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.webui.redis.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.webui.redis.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
volumeClaimTemplates:
- metadata:
name: {{ include "falcosidekick.fullname" . }}-ui-redis-data
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: {{ .Values.webui.redis.storageSize }}
{{- if .Values.webui.redis.storageClass }}
storageClassName: {{ .Values.webui.redis.storageClass }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,107 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "falcosidekick.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ include "falcosidekick.name" . }}
helm.sh/chart: {{ include "falcosidekick.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "falcosidekick.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
template:
metadata:
labels:
app.kubernetes.io/name: {{ include "falcosidekick.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- if and .Values.config.azure.podIdentityClientID .Values.config.azure.podIdentityName }}
aadpodidbinding: {{ include "falcosidekick.fullname" . }}
{{- end }}
{{- if .Values.podLabels }}
{{ toYaml .Values.podLabels | indent 8 }}
{{- end }}
annotations:
checksum/config: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }}
{{- if .Values.podAnnotations }}
{{ toYaml .Values.podAnnotations | indent 8 }}
{{- end }}
spec:
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
{{- range .Values.imagePullSecrets }}
- name: {{ . }}
{{- end }}
{{- end }}
serviceAccountName: {{ include "falcosidekick.fullname" . }}
{{- if .Values.priorityClassName }}
priorityClassName: "{{ .Values.priorityClassName }}"
{{- end }}
securityContext:
runAsUser: {{ .Values.podSecurityContext.runAsUser }}
fsGroup: {{ .Values.podSecurityContext.fsGroup }}
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: 2801
protocol: TCP
livenessProbe:
httpGet:
path: /ping
port: http
initialDelaySeconds: 10
periodSeconds: 5
readinessProbe:
httpGet:
path: /ping
port: http
initialDelaySeconds: 10
periodSeconds: 5
envFrom:
- secretRef:
{{- if .Values.config.existingSecret }}
name: {{ .Values.config.existingSecret }}
{{- else }}
name: {{ include "falcosidekick.fullname" . }}
{{- end }}
env:
- name: DEBUG
value: {{ .Values.config.debug | quote }}
- name: CUSTOMFIELDS
value: {{ .Values.config.customfields | quote }}
- name: MUTUALTLSFILESPATH
value: {{ .Values.config.mutualtlsfilespath | quote }}
{{- if .Values.config.extraEnv }}
{{ toYaml .Values.config.extraEnv | nindent 12 }}
{{- end }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- if .Values.extraVolumeMounts }}
volumeMounts:
{{ toYaml .Values.extraVolumeMounts | indent 12 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.extraVolumes }}
volumes:
{{ toYaml .Values.extraVolumes | indent 8 }}
{{- end }}

View File

@ -0,0 +1,56 @@
{{- if and .Values.webui.enabled .Values.webui.ingress.enabled -}}
{{- $fullName := include "falcosidekick.fullname" . -}}
{{- $ingressApiIsStable := eq (include "falcosidekick.ingress.isStable" .) "true" -}}
{{- $ingressSupportsPathType := eq (include "falcosidekick.ingress.supportsPathType" .) "true" -}}
---
apiVersion: {{ include "falcosidekick.ingress.apiVersion" . }}
kind: Ingress
metadata:
name: {{ $fullName }}-ui
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ include "falcosidekick.name" . }}-ui
helm.sh/chart: {{ include "falcosidekick.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}-ui
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- with .Values.webui.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.webui.ingress.ingressClassName }}
ingressClassName: {{ .Values.webui.ingress.ingressClassName }}
{{- end }}
{{- if .Values.webui.ingress.tls }}
tls:
{{- range .Values.webui.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.webui.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
{{- if $ingressSupportsPathType }}
pathType: {{ default "ImplementationSpecific" .pathType }}
{{- end }}
backend:
{{- if $ingressApiIsStable }}
service:
name: {{ $fullName }}-ui
port:
name: http
{{- else }}
serviceName: {{ $fullName }}-ui
servicePort: http
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,56 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "falcosidekick.fullname" . -}}
{{- $ingressApiIsStable := eq (include "falcosidekick.ingress.isStable" .) "true" -}}
{{- $ingressSupportsPathType := eq (include "falcosidekick.ingress.supportsPathType" .) "true" -}}
---
apiVersion: {{ include "falcosidekick.ingress.apiVersion" . }}
kind: Ingress
metadata:
name: {{ $fullName }}
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ include "falcosidekick.name" . }}
helm.sh/chart: {{ include "falcosidekick.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.ingress.ingressClassName }}
ingressClassName: {{ .Values.ingress.ingressClassName }}
{{- end }}
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
{{- if $ingressSupportsPathType }}
pathType: {{ default "ImplementationSpecific" .pathType }}
{{- end }}
backend:
{{- if $ingressApiIsStable }}
service:
name: {{ $fullName }}
port:
name: http
{{- else }}
serviceName: {{ $fullName }}
servicePort: http
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,35 @@
{{- if .Values.podSecurityPolicy.create}}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ template "falcosidekick.fullname" . }}
labels:
app: {{ template "falcosidekick.fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: "{{ .Release.Name }}"
heritage: "{{ .Release.Service }}"
spec:
privileged: false
allowPrivilegeEscalation: false
hostNetwork: false
readOnlyRootFilesystem: true
requiredDropCapabilities:
- ALL
fsGroup:
ranges:
- max: 65535
min: 1
rule: MustRunAs
runAsUser:
rule: MustRunAsNonRoot
seLinux:
rule: RunAsAny
supplementalGroups:
ranges:
- max: 65535
min: 1
rule: MustRunAs
volumes:
- configMap
- secret
{{- end }}

View File

@ -0,0 +1,41 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "falcosidekick.fullname" . }}-ui
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ include "falcosidekick.name" . }}-ui
helm.sh/chart: {{ include "falcosidekick.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "falcosidekick.fullname" . }}-ui
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ include "falcosidekick.name" . }}-ui
helm.sh/chart: {{ include "falcosidekick.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
rules: []
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "falcosidekick.fullname" . }}-ui
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ include "falcosidekick.name" . }}-ui
helm.sh/chart: {{ include "falcosidekick.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ include "falcosidekick.fullname" . }}-ui
subjects:
- kind: ServiceAccount
name: {{ include "falcosidekick.fullname" . }}-ui

View File

@ -0,0 +1,104 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "falcosidekick.fullname" . }}
namespace: {{ .Release.Namespace }}
{{- if .Values.config.aws.rolearn }}
annotations:
eks.amazonaws.com/role-arn: {{ .Values.config.aws.rolearn }}
{{- end }}
labels:
app.kubernetes.io/name: {{ include "falcosidekick.name" . }}
helm.sh/chart: {{ include "falcosidekick.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "falcosidekick.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ include "falcosidekick.name" . }}
helm.sh/chart: {{ include "falcosidekick.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
rules:
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
{{- if .Values.podSecurityPolicy.create }}
- apiGroups:
- policy
resources:
- podsecuritypolicies
resourceNames:
- {{ template "falcosidekick.fullname" . }}
verbs:
- use
{{- end }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "falcosidekick.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ include "falcosidekick.name" . }}
helm.sh/chart: {{ include "falcosidekick.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ include "falcosidekick.fullname" . }}
subjects:
- kind: ServiceAccount
name: {{ include "falcosidekick.fullname" . }}
{{- if .Values.config.policyreport.enabled }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "falcosidekick.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ include "falcosidekick.name" . }}
helm.sh/chart: {{ include "falcosidekick.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
rules:
- apiGroups:
- "wgpolicyk8s.io"
resources:
- policyreports
- clusterpolicyreports
verbs:
- get
- create
- delete
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "falcosidekick.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ include "falcosidekick.name" . }}
helm.sh/chart: {{ include "falcosidekick.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ include "falcosidekick.fullname" . }}
subjects:
- kind: ServiceAccount
namespace: {{ .Release.Namespace }}
name: {{ include "falcosidekick.fullname" . }}
{{- end }}

View File

@ -0,0 +1,296 @@
{{- if eq .Values.config.existingSecret "" }}
{{- $fullName := include "falcosidekick.fullname" . -}}
---
apiVersion: v1
kind: Secret
metadata:
name: {{ include "falcosidekick.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ include "falcosidekick.name" . }}
helm.sh/chart: {{ include "falcosidekick.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
type: Opaque
data:
# Slack Output
SLACK_WEBHOOKURL: "{{ .Values.config.slack.webhookurl | b64enc }}"
SLACK_OUTPUTFORMAT: "{{ .Values.config.slack.outputformat | b64enc }}"
SLACK_FOOTER: "{{ .Values.config.slack.footer | b64enc }}"
SLACK_ICON: "{{ .Values.config.slack.icon | b64enc }}"
SLACK_USERNAME: "{{ .Values.config.slack.username | b64enc }}"
SLACK_MINIMUMPRIORITY: "{{ .Values.config.slack.minimumpriority | b64enc }}"
SLACK_MESSAGEFORMAT: "{{ .Values.config.slack.messageformat | b64enc }}"
# RocketChat Output
ROCKETCHAT_WEBHOOKURL: "{{ .Values.config.rocketchat.webhookurl | b64enc }}"
ROCKETCHAT_OUTPUTFORMAT: "{{ .Values.config.rocketchat.outputformat | b64enc }}"
ROCKETCHAT_ICON: "{{ .Values.config.rocketchat.icon | b64enc }}"
ROCKETCHAT_USERNAME: "{{ .Values.config.rocketchat.username | b64enc }}"
ROCKETCHAT_MINIMUMPRIORITY: "{{ .Values.config.rocketchat.minimumpriority | b64enc }}"
ROCKETCHAT_MESSAGEFORMAT: "{{ .Values.config.rocketchat.messageformat | b64enc }}"
ROCKETCHAT_MUTUALTLS: "{{ .Values.config.rocketchat.mutualtls | printf "%t" | b64enc }}"
ROCKETCHAT_CHECKCERT: "{{ .Values.config.rocketchat.checkcert | printf "%t" | b64enc }}"
# Mattermost Output
MATTERMOST_WEBHOOKURL: "{{ .Values.config.mattermost.webhookurl | b64enc }}"
MATTERMOST_OUTPUTFORMAT: "{{ .Values.config.mattermost.outputformat | b64enc }}"
MATTERMOST_FOOTER: "{{ .Values.config.mattermost.footer | b64enc }}"
MATTERMOST_ICON: "{{ .Values.config.mattermost.icon | b64enc }}"
MATTERMOST_USERNAME: "{{ .Values.config.mattermost.username | b64enc }}"
MATTERMOST_MINIMUMPRIORITY: "{{ .Values.config.mattermost.minimumpriority | b64enc }}"
MATTERMOST_MESSAGEFORMAT: "{{ .Values.config.mattermost.messageformat | b64enc }}"
MATTERMOST_MUTUALTLS: "{{ .Values.config.mattermost.mutualtls | printf "%t" | b64enc }}"
MATTERMOST_CHECKCERT: "{{ .Values.config.mattermost.checkcert | printf "%t" | b64enc }}"
# Teams Output
TEAMS_WEBHOOKURL: "{{ .Values.config.teams.webhookurl | b64enc }}"
TEAMS_OUTPUTFORMAT: "{{ .Values.config.teams.outputformat | b64enc }}"
TEAMS_ACTIVITYIMAGE: "{{ .Values.config.teams.activityimage | b64enc }}"
TEAMS_MINIMUMPRIORITY: "{{ .Values.config.teams.minimumpriority | b64enc }}"
# Datadog Output
DATADOG_APIKEY: "{{ .Values.config.datadog.apikey | b64enc }}"
DATADOG_HOST: "{{ .Values.config.datadog.host | b64enc }}"
DATADOG_MINIMUMPRIORITY: "{{ .Values.config.datadog.minimumpriority | b64enc }}"
# AlertManager Output
ALERTMANAGER_HOSTPORT: "{{ .Values.config.alertmanager.hostport | b64enc }}"
ALERTMANAGER_ENDPOINT: "{{ .Values.config.alertmanager.endpoint | b64enc }}"
ALERTMANAGER_MINIMUMPRIORITY: "{{ .Values.config.alertmanager.minimumpriority | b64enc }}"
ALERTMANAGER_MUTUALTLS: "{{ .Values.config.alertmanager.mutualtls | printf "%t" | b64enc }}"
ALERTMANAGER_CHECKCERT: "{{ .Values.config.alertmanager.checkcert | printf "%t" | b64enc }}"
# InfluxDB Output
INFLUXDB_USER: "{{ .Values.config.influxdb.user | b64enc }}"
INFLUXDB_PASSWORD: "{{ .Values.config.influxdb.password | b64enc }}"
INFLUXDB_HOSTPORT: "{{ .Values.config.influxdb.hostport | b64enc }}"
INFLUXDB_MINIMUMPRIORITY: "{{ .Values.config.influxdb.minimumpriority | b64enc }}"
INFLUXDB_DATABASE: "{{ .Values.config.influxdb.database | b64enc }}"
INFLUXDB_MUTUALTLS: "{{ .Values.config.influxdb.mutualtls | printf "%t" | b64enc }}"
INFLUXDB_CHECKCERT: "{{ .Values.config.influxdb.checkcert | printf "%t" | b64enc }}"
# AWS Output
AWS_ACCESSKEYID: "{{ .Values.config.aws.accesskeyid | b64enc }}"
AWS_SECRETACCESSKEY: "{{ .Values.config.aws.secretaccesskey | b64enc }}"
AWS_REGION: "{{ .Values.config.aws.region | b64enc }}"
AWS_LAMBDA_FUNCTIONNAME: "{{ .Values.config.aws.lambda.functionname | b64enc }}"
AWS_LAMBDA_MINIMUMPRIORITY: "{{ .Values.config.aws.lambda.minimumpriority | b64enc }}"
AWS_CLOUDWATCHLOGS_LOGGROUP: "{{ .Values.config.aws.cloudwatchlogs.loggroup | b64enc }}"
AWS_CLOUDWATCHLOGS_LOGSTREAM: "{{ .Values.config.aws.cloudwatchlogs.logstream | b64enc }}"
AWS_CLOUDWATCHLOGS_MINIMUMPRIORITY: "{{ .Values.config.aws.cloudwatchlogs.minimumpriority | b64enc }}"
AWS_SNS_TOPICARN: "{{ .Values.config.aws.sns.topicarn | b64enc }}"
AWS_SNS_RAWJSON: "{{ .Values.config.aws.sns.rawjson| printf "%t" | b64enc }}"
AWS_SNS_MINIMUMPRIORITY: "{{ .Values.config.aws.sns.minimumpriority | b64enc }}"
AWS_SQS_URL: "{{ .Values.config.aws.sqs.url | b64enc }}"
AWS_SQS_MINIMUMPRIORITY: "{{ .Values.config.aws.sqs.minimumpriority | b64enc }}"
AWS_S3_BUCKET: "{{ .Values.config.aws.s3.bucket | b64enc }}"
AWS_S3_PREFIX: "{{ .Values.config.aws.s3.prefix | b64enc }}"
AWS_S3_MINIMUMPRIORITY: "{{ .Values.config.aws.s3.minimumpriority | b64enc }}"
AWS_KINESIS_STREAMNAME: "{{ .Values.config.aws.kinesis.streamname | b64enc }}"
AWS_KINESIS_MINIMUMPRIORITY: "{{ .Values.config.aws.kinesis.minimumpriority | b64enc }}"
# SMTP Output
SMTP_USER: "{{ .Values.config.smtp.user | b64enc }}"
SMTP_PASSWORD: "{{ .Values.config.smtp.password | b64enc }}"
SMTP_HOSTPORT: "{{ .Values.config.smtp.hostport | b64enc }}"
SMTP_FROM: "{{ .Values.config.smtp.from | b64enc }}"
SMTP_TO: "{{ .Values.config.smtp.to | b64enc }}"
SMTP_OUTPUTFORMAT: "{{ .Values.config.smtp.outputformat | b64enc }}"
SMTP_MINIMUMPRIORITY: "{{ .Values.config.smtp.minimumpriority | b64enc }}"
# OpsGenie Output
OPSGENIE_APIKEY: "{{ .Values.config.opsgenie.apikey | b64enc }}"
OPSGENIE_REGION: "{{ .Values.config.opsgenie.region | b64enc }}"
OPSGENIE_MINIMUMPRIORITY: "{{ .Values.config.opsgenie.minimumpriority | b64enc }}"
OPSGENIE_MUTUALTLS: "{{ .Values.config.opsgenie.mutualtls | printf "%t" | b64enc }}"
OPSGENIE_CHECKCERT: "{{ .Values.config.opsgenie.checkcert | printf "%t" | b64enc }}"
# Discord Output
DISCORD_WEBHOOKURL: "{{ .Values.config.discord.webhookurl | b64enc }}"
DISCORD_ICON: "{{ .Values.config.discord.icon | b64enc }}"
DISCORD_MINIMUMPRIORITY: "{{ .Values.config.discord.minimumpriority | b64enc }}"
# GCP Output
GCP_CREDENTIALS: "{{ .Values.config.gcp.credentials | b64enc }}"
GCP_PUBSUB_PROJECTID: "{{ .Values.config.gcp.pubsub.projectid | b64enc }}"
GCP_PUBSUB_TOPIC: "{{ .Values.config.gcp.pubsub.topic | b64enc }}"
GCP_PUBSUB_MINIMUMPRIORITY: "{{ .Values.config.gcp.pubsub.minimumpriority | b64enc }}"
GCP_STORAGE_BUCKET: "{{ .Values.config.gcp.storage.bucket | b64enc }}"
GCP_STORAGE_PREFIX: "{{ .Values.config.gcp.storage.prefix | b64enc }}"
GCP_STORAGE_MINIMUMPRIORITY: "{{ .Values.config.gcp.storage.minimumpriority | b64enc }}"
GCP_CLOUDFUNCTIONS_NAME: "{{ .Values.config.gcp.cloudfunctions.name | b64enc }}"
GCP_CLOUDFUNCTIONS_MINIMUMPRIORITY: "{{ .Values.config.gcp.cloudfunctions.minimumpriority | b64enc }}"
GCP_CLOUDRUN_ENDPOINT: "{{ .Values.config.gcp.cloudrun.endpoint | b64enc }}"
GCP_CLOUDRUN_JWT: "{{ .Values.config.gcp.cloudrun.jwt | b64enc }}"
GCP_CLOUDRUN_MINIMUMPRIORITY: "{{ .Values.config.gcp.cloudrun.minimumpriority | b64enc }}"
# GoogleChat Output
GOOGLECHAT_WEBHOOKURL: "{{ .Values.config.googlechat.webhookurl | b64enc }}"
GOOGLECHAT_OUTPUTFORMAT: "{{ .Values.config.googlechat.outputformat | b64enc }}"
GOOGLECHAT_MINIMUMPRIORITY: "{{ .Values.config.googlechat.minimumpriority | b64enc }}"
GOOGLECHAT_MESSAGEFORMAT: "{{ .Values.config.googlechat.messageformat | b64enc }}"
# ElasticSearch Output
ELASTICSEARCH_HOSTPORT: "{{ .Values.config.elasticsearch.hostport | b64enc }}"
ELASTICSEARCH_INDEX: "{{ .Values.config.elasticsearch.index | b64enc }}"
ELASTICSEARCH_TYPE: "{{ .Values.config.elasticsearch.type | b64enc }}"
ELASTICSEARCH_MINIMUMPRIORITY: "{{ .Values.config.elasticsearch.minimumpriority | b64enc }}"
ELASTICSEARCH_MUTUALTLS: "{{ .Values.config.elasticsearch.mutualtls | printf "%t" | b64enc }}"
ELASTICSEARCH_CHECKCERT: "{{ .Values.config.elasticsearch.checkcert | printf "%t" | b64enc }}"
ELASTICSEARCH_USERNAME: "{{ .Values.config.elasticsearch.username | b64enc }}"
ELASTICSEARCH_PASSWORD: "{{ .Values.config.elasticsearch.password | b64enc }}"
# Loki Output
LOKI_HOSTPORT: "{{ .Values.config.loki.hostport | b64enc }}"
LOKI_ENDPOINT: "{{ .Values.config.loki.endpoint | b64enc }}"
LOKI_TENANT: "{{ .Values.config.loki.tenant | b64enc }}"
LOKI_MINIMUMPRIORITY: "{{ .Values.config.loki.minimumpriority | b64enc }}"
LOKI_MUTUALTLS: "{{ .Values.config.loki.mutualtls | printf "%t" | b64enc }}"
LOKI_CHECKCERT: "{{ .Values.config.loki.checkcert | printf "%t" | b64enc }}"
# Nats Output
NATS_HOSTPORT: "{{ .Values.config.nats.hostport | b64enc }}"
NATS_MINIMUMPRIORITY: "{{ .Values.config.nats.minimumpriority | b64enc }}"
NATS_MUTUALTLS: "{{ .Values.config.nats.mutualtls | printf "%t" | b64enc }}"
NATS_CHECKCERT: "{{ .Values.config.nats.checkcert | printf "%t" | b64enc }}"
# Stan Output
STAN_HOSTPORT: "{{ .Values.config.stan.hostport | b64enc }}"
STAN_CLUSTERID: "{{ .Values.config.stan.clusterid | b64enc }}"
STAN_CLIENTID: "{{ .Values.config.stan.clientid | b64enc }}"
STAN_MINIMUMPRIORITY: "{{ .Values.config.stan.minimumpriority | b64enc }}"
STAN_MUTUALTLS: "{{ .Values.config.stan.mutualtls | printf "%t" | b64enc }}"
STAN_CHECKCERT: "{{ .Values.config.stan.checkcert | printf "%t" | b64enc }}"
# Statsd
STATSD_FORWARDER: "{{ .Values.config.statsd.forwarder | b64enc }}"
STATSD_NAMESPACE: "{{ .Values.config.statsd.namespace | b64enc }}"
# Dogstatsd
DOGSTATSD_FORWARDER: "{{ .Values.config.dogstatsd.forwarder | b64enc }}"
DOGSTATSD_NAMESPACE: "{{ .Values.config.dogstatsd.namespace | b64enc }}"
DOGSTATSD_TAGS: "{{ .Values.config.dogstatsd.tags | b64enc }}"
# WebHook Output
WEBHOOK_ADDRESS: "{{ .Values.config.webhook.address | b64enc }}"
WEBHOOK_CUSTOMHEADERS: "{{ .Values.config.webhook.customHeaders | b64enc }}"
WEBHOOK_MINIMUMPRIORITY: "{{ .Values.config.webhook.minimumpriority | b64enc }}"
WEBHOOK_MUTUALTLS: "{{ .Values.config.webhook.mutualtls | printf "%t" | b64enc }}"
WEBHOOK_CHECKCERT: "{{ .Values.config.webhook.checkcert | printf "%t" | b64enc }}"
# Azure Output
AZURE_EVENTHUB_NAME: "{{ .Values.config.azure.eventHub.name | b64enc }}"
AZURE_EVENTHUB_NAMESPACE: "{{ .Values.config.azure.eventHub.namespace | b64enc }}"
AZURE_EVENTHUB_MINIMUMPRIORITY: "{{ .Values.config.azure.eventHub.minimumpriority | b64enc }}"
# Kafka Output
KAFKA_HOSTPORT: "{{ .Values.config.kafka.hostport | b64enc }}"
KAFKA_TOPIC: "{{ .Values.config.kafka.topic | b64enc }}"
KAFKA_PARTITION: "{{ .Values.config.kafka.partition | b64enc }}"
KAFKA_MINIMUMPRIORITY: "{{ .Values.config.kafka.minimumpriority | b64enc }}"
# PagerDuty Output
PAGERDUTY_ROUTINGKEY: "{{ .Values.config.pagerduty.routingkey | b64enc }}"
PAGERDUTY_MINIMUMPRIORITY: "{{ .Values.config.pagerduty.minimumpriority | b64enc }}"
# Kubeless Output
KUBELESS_FUNCTION: "{{ .Values.config.kubeless.function | b64enc }}"
KUBELESS_NAMESPACE: "{{ .Values.config.kubeless.namespace | b64enc }}"
KUBELESS_PORT: "{{ .Values.config.kubeless.port | toString | b64enc }}"
KUBELESS_MINIMUMPRIORITY: "{{ .Values.config.kubeless.minimumpriority | b64enc }}"
KUBELESS_MUTUALTLS: "{{ .Values.config.kubeless.mutualtls | printf "%t" | b64enc }}"
KUBELESS_CHECKCERT: "{{ .Values.config.kubeless.checkcert | printf "%t" | b64enc }}"
# OpenFaaS
OPENFAAS_GATEWAYNAMESPACE: "{{ .Values.config.openfaas.gatewaynamespace | b64enc }}"
OPENFAAS_GATEWAYSERVICE: "{{ .Values.config.openfaas.gatewayservice | b64enc }}"
OPENFAAS_FUNCTIONNAME: "{{ .Values.config.openfaas.functionname | b64enc }}"
OPENFAAS_FUNCTIONNAMESPACE: "{{ .Values.config.openfaas.functionnamespace | b64enc }}"
OPENFAAS_GATEWAYPORT: "{{ .Values.config.openfaas.gatewayport | toString | b64enc }}"
OPENFAAS_MINIMUMPRIORITY: "{{ .Values.config.openfaas.minimumpriority | b64enc }}"
OPENFAAS_MUTUALTLS: "{{ .Values.config.openfaas.mutualtls | printf "%t" | b64enc }}"
OPENFAAS_CHECKCERT: "{{ .Values.config.openfaas.checkcert | printf "%t" | b64enc }}"
# Cloud Events Output
CLOUDEVENTS_ADDRESS: "{{ .Values.config.cloudevents.address | b64enc }}"
CLOUDEVENTS_EXTENSION: "{{ .Values.config.cloudevents.extension | b64enc }}"
CLOUDEVENTS_MINIMUMPRIORITY: "{{ .Values.config.cloudevents.minimumpriority | b64enc }}"
# RabbitMQ Output
RABBITMQ_URL: "{{ .Values.config.rabbitmq.url | b64enc}}"
RABBITMQ_QUEUE: "{{ .Values.config.rabbitmq.queue | b64enc}}"
RABBITMQ_MINIMUMPRIORITY: "{{ .Values.config.rabbitmq.minimumpriority | b64enc}}"
# Wavefront Output
WAVEFRONT_ENDPOINTTYPE: "{{ .Values.config.wavefront.endpointtype | b64enc}}"
WAVEFRONT_ENDPOINTHOST: "{{ .Values.config.wavefront.endpointhost | b64enc}}"
WAVEFRONT_ENDPOINTTOKEN: "{{ .Values.config.wavefront.endpointtoken | b64enc}}"
WAVEFRONT_ENDPOINTMETRICPORT: "{{ .Values.config.wavefront.endpointmetricport | toString | b64enc}}"
WAVEFRONT_FLUSHINTERVALSECONDS: "{{ .Values.config.wavefront.flushintervalseconds | toString | b64enc}}"
WAVEFRONT_BATCHSIZE: "{{ .Values.config.wavefront.batchsize | toString | b64enc}}"
WAVEFRONT_METRICNAME: "{{ .Values.config.wavefront.metricname | b64enc}}"
WAVEFRONT_MINIMUMPRIORITY: "{{ .Values.config.wavefront.minimumpriority | b64enc}}"
# Grafana Output
GRAFANA_HOSTPORT: "{{ .Values.config.grafana.hostport | b64enc}}"
GRAFANA_APIKEY: "{{ .Values.config.grafana.apikey | b64enc}}"
GRAFANA_DASHBOARDID: "{{ .Values.config.grafana.dashboardid | toString | b64enc}}"
GRAFANA_PANELID: "{{ .Values.config.grafana.panelid | toString | b64enc}}"
GRAFANA_ALLFIELDSASTAGS: "{{ .Values.config.grafana.allfieldsastags | printf "%t" | b64enc}}"
GRAFANA_MUTUALTLS: "{{ .Values.config.grafana.mutualtls | printf "%t" | b64enc}}"
GRAFANA_CHECKCERT: "{{ .Values.config.grafana.checkcert | printf "%t" | b64enc}}"
GRAFANA_MINIMUMPRIORITY: "{{ .Values.config.grafana.minimumpriority | b64enc}}"
# Fission Output
FISSION_FUNCTION: "{{ .Values.config.fission.function | b64enc}}"
FISSION_ROUTERNAMESPACE: "{{ .Values.config.fission.routernamespace | b64enc}}"
FISSION_ROUTERSERVICE: "{{ .Values.config.fission.routerservice | b64enc}}"
FISSION_ROUTERPORT: "{{ .Values.config.fission.routerport | toString | b64enc}}"
FISSION_MINIMUMPRIORITY: "{{ .Values.config.fission.minimumpriority| b64enc}}"
FISSION_MUTUALTLS: "{{ .Values.config.fission.mutualtls | printf "%t" | b64enc}}"
FISSION_CHECKCERT: "{{ .Values.config.fission.checkcert | printf "%t" | b64enc}}"
# Yandex Output
YANDEX_ACCESSKEYID: "{{ .Values.config.yandex.accesskeyid | b64enc}}"
YANDEX_SECRETACCESSKEY: "{{ .Values.config.yandex.secretaccesskey | b64enc}}"
YANDEX_REGION: "{{ .Values.config.yandex.region | b64enc}}"
YANDEX_S3_ENDPOINT: "{{ .Values.config.yandex.s3.endpoint | b64enc}}"
YANDEX_S3_BUCKET: "{{ .Values.config.yandex.s3.bucket | b64enc}}"
YANDEX_S3_PREFIX: "{{ .Values.config.yandex.s3.prefix | b64enc}}"
YANDEX_S3_MINIMUMPRIORITY: "{{ .Values.config.yandex.s3.minimumpriority | b64enc}}"
# KafkaRest Output
KAFKAREST_ADDRESS: "{{ .Values.config.kafkarest.address | b64enc}}"
KAFKAREST_VERSION: "{{ .Values.config.kafkarest.version | toString | b64enc}}"
KAFKAREST_MINIMUMPRIORITY : "{{ .Values.config.kafkarest.minimumpriority | b64enc}}"
KAFKAREST_MUTUALTLS : "{{ .Values.config.kafkarest.mutualtls | printf "%t" | b64enc}}"
KAFKAREST_CHECKCERT : "{{ .Values.config.kafkarest.checkcert | printf "%t" | b64enc}}"
# Syslog
SYSLOG_HOST: "{{ .Values.config.syslog.host | b64enc}}"
SYSLOG_PORT: "{{ .Values.config.syslog.port | printf "%t" | b64enc}}"
SYSLOG_PROTOCOL: "{{ .Values.config.syslog.protocol | b64enc}}"
SYSLOG_MINIMUMPRIORITY : "{{ .Values.config.syslog.minimumpriority | b64enc}}"
# Zoho Cliq
CLIQ_WEBHOOKURL: "{{ .Values.config.cliq.webhookurl | b64enc}}"
CLIQ_ICON: "{{ .Values.config.cliq.icon | b64enc}}"
CLIQ_USEEMOJI: "{{ .Values.config.cliq.useemoji | printf "%t" | b64enc}}"
CLIQ_OUTPUTFORMAT: "{{ .Values.config.cliq.outputformat | b64enc}}"
CLIQ_MESSAGEFORMAT: "{{ .Values.config.cliq.messageformat | b64enc}}"
CLIQ_MINIMUMPRIORITY : "{{ .Values.config.cliq.minimumpriority | b64enc}}"
# Policy Reporter
POLICYREPORT_ENABLED: "{{ .Values.config.policyreport.enabled | printf "%t"| b64enc}}"
POLICYREPORT_KUBECONFIG: "{{ .Values.config.policyreport.kubeconfig | b64enc}}"
POLICYREPORT_MAXEVENTS: "{{ .Values.config.policyreport.maxevents | toString | b64enc}}"
POLICYREPORT_PRUNEBYPRIORITY: "{{ .Values.config.policyreport.prunebypriority | printf "%t" | b64enc}}"
POLICYREPORT_MINIMUMPRIORITY : "{{ .Values.config.policyreport.minimumpriority | b64enc}}"
# WebUI Output
{{- if .Values.webui.enabled -}}
{{ $weburl := printf "http://%s-ui:2802" (include "falcosidekick.fullname" .) }}
WEBUI_URL: "{{ $weburl | b64enc }}"
{{- end }}
{{- end }}

View File

@ -0,0 +1,55 @@
{{- if .Values.webui.enabled -}}
---
apiVersion: v1
kind: Service
metadata:
name: {{ include "falcosidekick.fullname" . }}-ui
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ include "falcosidekick.name" . }}-ui
helm.sh/chart: {{ include "falcosidekick.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}-ui
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- with .Values.webui.service.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
type: {{ .Values.webui.service.type }}
ports:
- port: {{ .Values.webui.service.port }}
{{ if eq .Values.webui.service.type "NodePort" }}
nodePort: {{ .Values.webui.service.nodePort }}
{{ end }}
targetPort: {{ .Values.webui.service.targetPort }}
protocol: TCP
name: http
selector:
app.kubernetes.io/name: {{ include "falcosidekick.name" . }}-ui
app.kubernetes.io/instance: {{ .Release.Name }}-ui
---
apiVersion: v1
kind: Service
metadata:
name: {{ include "falcosidekick.fullname" . }}-ui-redis
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ include "falcosidekick.name" . }}-ui-redis
helm.sh/chart: {{ include "falcosidekick.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}-ui
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- with .Values.webui.redis.service.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
type: ClusterIP
ports:
- port: {{ .Values.webui.redis.service.port }}
targetPort: {{ .Values.webui.redis.service.targetPort }}
protocol: TCP
name: redis
selector:
app.kubernetes.io/name: {{ include "falcosidekick.name" . }}-ui-redis
app.kubernetes.io/instance: {{ .Release.Name }}-ui-redis
{{- end }}

View File

@ -0,0 +1,25 @@
---
apiVersion: v1
kind: Service
metadata:
name: {{ include "falcosidekick.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ include "falcosidekick.name" . }}
helm.sh/chart: {{ include "falcosidekick.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- with .Values.service.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: {{ include "falcosidekick.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}

View File

@ -0,0 +1,19 @@
apiVersion: v1
kind: Pod
metadata:
name: "{{ include "falcosidekick.fullname" . }}-test-connection"
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ include "falcosidekick.name" . }}
helm.sh/chart: {{ include "falcosidekick.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
annotations:
"helm.sh/hook": test-success
spec:
containers:
- name: curl
image: appropriate/curl
command: ['curl']
args: ["-X", "POST", '{{ include "falcosidekick.fullname" . }}:{{ .Values.service.port }}/ping']
restartPolicy: Never

View File

@ -0,0 +1,488 @@
# Default values for falcosidekick.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 2
image:
registry: docker.io
repository: falcosecurity/falcosidekick
tag: 2.25.0
pullPolicy: IfNotPresent
podSecurityContext:
runAsUser: 1234
fsGroup: 1234
# One or more secrets to be used when pulling images
imagePullSecrets: []
# - registrySecretName
nameOverride: ""
fullnameOverride: ""
podSecurityPolicy:
create: false
priorityClassName: ""
podLabels: {}
podAnnotations: {}
config:
existingSecret: ""
extraEnv: []
debug: false
##
## a list of escaped comma separated custom fields to add to falco events, syntax is "key:value\,key:value"
customfields: ""
mutualtlsfilespath: "/etc/certs" # folder which will used to store client.crt, client.key and ca.crt files for mutual tls (default: "/etc/certs")
slack:
webhookurl: ""
footer: ""
icon: ""
username: ""
outputformat: "all"
minimumpriority: ""
messageformat: ""
rocketchat:
webhookurl: ""
icon: ""
username: ""
outputformat: "all"
minimumpriority: ""
messageformat: ""
mutualtls: false
checkcert: true
mattermost:
webhookurl: ""
footer: ""
icon: ""
username: ""
outputformat: "all"
minimumpriority: ""
messageformat: ""
mutualtls: false
checkcert: true
teams:
webhookurl: ""
activityimage: ""
outputformat: "all"
minimumpriority: ""
datadog:
apikey: ""
minimumpriority: ""
host: ""
alertmanager:
hostport: ""
endpoint: "/api/v1/alerts"
minimumpriority: ""
mutualtls: false
checkcert: true
elasticsearch:
hostport: ""
index: "falco"
type: "event"
minimumpriority: ""
mutualtls: false
checkcert: true
username: ""
password: ""
influxdb:
hostport: ""
database: "falco"
user: ""
password: ""
minimumpriority: ""
mutualtls: false
checkcert: true
loki:
hostport: ""
endpoint: "/api/prom/push"
tenant: ""
minimumpriority: ""
mutualtls: false
checkcert: true
nats:
hostport: ""
minimumpriority: ""
mutualtls: false
checkcert: true
stan:
hostport: ""
clusterid: ""
clientid: ""
minimumpriority: ""
mutualtls: false
checkcert: true
aws:
rolearn: ""
accesskeyid: ""
secretaccesskey: ""
region: ""
cloudwatchlogs:
loggroup: ""
logstream: ""
minimumpriority: ""
lambda:
functionname: ""
minimumpriority: ""
sns:
topicarn: ""
rawjson: false
minimumpriority: ""
sqs:
url: ""
minimumpriority: ""
s3:
bucket: ""
prefix: ""
minimumpriority: ""
kinesis:
streamname: ""
minimumpriority: ""
smtp:
hostport: ""
user: ""
password: ""
from: ""
to: ""
outputformat: "html"
minimumpriority: ""
opsgenie:
apikey: ""
region: ""
minimumpriority: ""
mutualtls: false
checkcert: true
statsd:
forwarder: ""
namespace: "falcosidekick."
dogstatsd:
forwarder: ""
namespace: "falcosidekick."
tags: ""
webhook:
address: ""
customHeaders: "" # a list of comma separated custom headers to add, syntax is "key:value\,key:value"
minimumpriority: ""
mutualtls: false
checkcert: true
azure:
subscriptionID: ""
resourceGroupName: ""
podIdentityClientID: ""
podIdentityName: ""
eventHub:
namespace: ""
name: ""
minimumpriority: ""
discord:
webhookurl: ""
icon: ""
minimumpriority: ""
gcp:
credentials: "" # The base64-encoded JSON key file for the GCP service account
pubsub:
projectid: "" # The GCP Project ID containing the Pub/Sub Topic
topic: "" # The name of the Pub/Sub topic
minimumpriority: ""
storage:
prefix: ""
bucket: ""
minimumpriority: "debug"
cloudfunctions:
name: "" # The name of the Cloud Function name
minimumpriority: ""
cloudrun:
endpoint: "" # the URL of the Cloud Run function
jwt: "" # JWT for the private access to Cloud Run function
minimumpriority: ""
googlechat:
webhookurl: ""
outputformat: "all"
minimumpriority: ""
messageformat: ""
kafka:
hostport: ""
topic: ""
partition: "0"
messageformat: ""
minimumpriority: ""
pagerduty:
routingkey: ""
minimumpriority: ""
kubeless:
function: ""
namespace: ""
port: 8080
minimumpriority: ""
mutualtls: false
checkcert: true
openfaas:
functionname: ""
functionnamespace: "openfaas-fn"
gatewayservice: "gateway"
gatewayport: 8080
gatewaynamespace: "openfaas"
minimumpriority: ""
mutualtls: false
checkcert: true
cloudevents:
address: ""
extension: ""
minimumpriority: ""
rabbitmq:
url: ""
queue: ""
minimumpriority: "debug"
wavefront:
endpointtype: "" # Wavefront endpoint type, must be 'direct' or 'proxy'. If not empty, with endpointhost, Wavefront output is enabled
endpointhost: "" # Wavefront endpoint address (only the host). If not empty, with endpointhost, Wavefront output is enabled
endpointtoken: "" # Wavefront token. Must be used only when endpointtype is 'direct'
endpointmetricport: 2878 # Wavefront endpoint port when type is 'proxy'
metricname: "falco.alert" # Metric to be created in Wavefront. Defaults to falco.alert
batchsize: 10000 # max batch of data sent per flush interval. defaults to 10,000. Used only in direct mode
flushintervalseconds: 1 # Time in seconds between flushing metrics to Wavefront. Defaults to 1s
minimumpriority: "debug" # minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default)
grafana:
hostport: "" # http://{domain or ip}:{port}, if not empty, Grafana output is enabled
apikey: "" # API Key to authenticate to Grafana, if not empty, Grafana output is enabled
dashboardid: "" # annotations are scoped to a specific dashboard. Optionnal.
panelid: "" # annotations are scoped to a specific panel. Optionnal.
allfieldsastags: false # if true, all custom fields are added as tags (default: false)
mutualtls: false # if true, checkcert flag will be ignored (server cert will always be checked)
checkcert: true # check if ssl certificate of the output is valid (default: true)
minimumpriority: "" # minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default)
fission:
function: "" # Name of Fission function, if not empty, Fission is enabled
routernamespace: "fission" # Namespace of Fission Router, "fission" (default)
routerservice: "router" # Service of Fission Router, "router" (default)
routerport: 80 # Port of service of Fission Router
minimumpriority: "" # minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default)
checkcert: true # check if ssl certificate of the output is valid (default: true)
mutualtls: false # if true, checkcert flag will be ignored (server cert will always be checked)
yandex:
accesskeyid: "" # yandex access key
secretaccesskey: "" # yandex secret access key
region: "" # yandex storage region (default: ru-central-1)
s3:
endpoint: "" # yandex storage endpoint (default: https://storage.yandexcloud.net)
bucket: "" # Yandex storage, bucket name
prefix: "" # name of prefix, keys will have format: s3://<bucket>/<prefix>/YYYY-MM-DD/YYYY-MM-DDTHH:mm:ss.s+01:00.json
minimumpriority: "" # minimum priority of event for using this output, order is emergency|alert|critical|erro
kafkarest:
address: "" # The full URL to the topic (example "http://kafkarest:8082/topics/test")
version: 2 # Kafka Rest Proxy API version 2|1 (default: 2)
minimumpriority: "" # minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default)
mutualtls: false # if true, checkcert flag will be ignored (server cert will always be checked)
checkcert: true # check if ssl certificate of the output is valid (default: true)
syslog:
host: ""
port: ""
protocol: "tcp"
minimumpriority: "" # minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default)
cliq:
webhookurl: ""
icon: ""
useemoji: true
outputformat: "all"
messageformat: ""
minimumpriority: "" # minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default)
policyreport:
enabled: false
kubeconfig: "~/.kube/config"
maxevents: 1000
prunebypriority: false
minimumpriority: "" # minimum priority of event for using this output, order is emergency|alert|critical|error|warning|notice|informational|debug or "" (default)
service:
type: ClusterIP
port: 2801
annotations: {}
# networking.gke.io/load-balancer-type: Internal
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: falcosidekick.local
paths:
- path: /
# -- pathType (e.g. ImplementationSpecific, Prefix, .. etc.)
# pathType: Prefix
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
extraVolumes: []
# - name: optional-mtls-volume
# configMap:
# name: falco-certs-optional
# optional: true
# items:
# - key: mtlscert.optional.tls
# path: mtlscert.optional.tls
extraVolumeMounts: []
# - mountPath: /etc/certs/mtlscert.optional.tls
# name: optional-mtls-volume
webui:
enabled: false
replicaCount: 2
image:
registry: docker.io
repository: falcosecurity/falcosidekick-ui
tag: "v2.0.2"
pullPolicy: IfNotPresent
podSecurityContext:
runAsUser: 1234
fsGroup: 1234
priorityClassName: ""
podLabels: {}
podAnnotations: {}
service:
# type: LoadBalancer
type: ClusterIP
port: 2802
nodePort: 30282
targetPort: 2802
annotations: {}
# service.beta.kubernetes.io/aws-load-balancer-internal: "true"
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: falcosidekick-ui.local
paths:
- path: /
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
redis:
image:
registry: docker.io
repository: redislabs/redisearch
tag: "2.2.4"
pullPolicy: IfNotPresent
priorityClassName: ""
podLabels: {}
podAnnotations: {}
storageSize: "1Gi"
storageClass: ""
service:
# type: LoadBalancer
type: ClusterIP
port: 6379
targetPort: 6379
annotations: {}
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}

View File

@ -0,0 +1,5 @@
# CI values for Falco.
# To deploy Falco on CI we need to set an argument to bypass the installation
# of the kernel module, so we bypass that.
extraArgs:
- --userspace

View File

@ -0,0 +1,188 @@
#
# Copyright (C) 2019 The Falco Authors.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
- required_engine_version: 2
################################################################
# By default all application-related rules are disabled for
# performance reasons. Depending on the application(s) you use,
# uncomment the corresponding rule definitions for
# application-specific activity monitoring.
################################################################
# Elasticsearch ports
- macro: elasticsearch_cluster_port
condition: fd.sport=9300
- macro: elasticsearch_api_port
condition: fd.sport=9200
- macro: elasticsearch_port
condition: elasticsearch_cluster_port or elasticsearch_api_port
# - rule: Elasticsearch unexpected network inbound traffic
# desc: inbound network traffic to elasticsearch on a port other than the standard ports
# condition: user.name = elasticsearch and inbound and not elasticsearch_port
# output: "Inbound network traffic to Elasticsearch on unexpected port (connection=%fd.name)"
# priority: WARNING
# - rule: Elasticsearch unexpected network outbound traffic
# desc: outbound network traffic from elasticsearch on a port other than the standard ports
# condition: user.name = elasticsearch and outbound and not elasticsearch_cluster_port
# output: "Outbound network traffic from Elasticsearch on unexpected port (connection=%fd.name)"
# priority: WARNING
# ActiveMQ ports
- macro: activemq_cluster_port
condition: fd.sport=61616
- macro: activemq_web_port
condition: fd.sport=8161
- macro: activemq_port
condition: activemq_web_port or activemq_cluster_port
# - rule: Activemq unexpected network inbound traffic
# desc: inbound network traffic to activemq on a port other than the standard ports
# condition: user.name = activemq and inbound and not activemq_port
# output: "Inbound network traffic to ActiveMQ on unexpected port (connection=%fd.name)"
# priority: WARNING
# - rule: Activemq unexpected network outbound traffic
# desc: outbound network traffic from activemq on a port other than the standard ports
# condition: user.name = activemq and outbound and not activemq_cluster_port
# output: "Outbound network traffic from ActiveMQ on unexpected port (connection=%fd.name)"
# priority: WARNING
# Cassandra ports
# https://docs.datastax.com/en/cassandra/2.0/cassandra/security/secureFireWall_r.html
- macro: cassandra_thrift_client_port
condition: fd.sport=9160
- macro: cassandra_cql_port
condition: fd.sport=9042
- macro: cassandra_cluster_port
condition: fd.sport=7000
- macro: cassandra_ssl_cluster_port
condition: fd.sport=7001
- macro: cassandra_jmx_port
condition: fd.sport=7199
- macro: cassandra_port
condition: >
cassandra_thrift_client_port or
cassandra_cql_port or cassandra_cluster_port or
cassandra_ssl_cluster_port or cassandra_jmx_port
# - rule: Cassandra unexpected network inbound traffic
# desc: inbound network traffic to cassandra on a port other than the standard ports
# condition: user.name = cassandra and inbound and not cassandra_port
# output: "Inbound network traffic to Cassandra on unexpected port (connection=%fd.name)"
# priority: WARNING
# - rule: Cassandra unexpected network outbound traffic
# desc: outbound network traffic from cassandra on a port other than the standard ports
# condition: user.name = cassandra and outbound and not (cassandra_ssl_cluster_port or cassandra_cluster_port)
# output: "Outbound network traffic from Cassandra on unexpected port (connection=%fd.name)"
# priority: WARNING
# Couchdb ports
# https://github.com/davisp/couchdb/blob/master/etc/couchdb/local.ini
- macro: couchdb_httpd_port
condition: fd.sport=5984
- macro: couchdb_httpd_ssl_port
condition: fd.sport=6984
# xxx can't tell what clustering ports are used. not writing rules for this
# yet.
# Fluentd ports
- macro: fluentd_http_port
condition: fd.sport=9880
- macro: fluentd_forward_port
condition: fd.sport=24224
# - rule: Fluentd unexpected network inbound traffic
# desc: inbound network traffic to fluentd on a port other than the standard ports
# condition: user.name = td-agent and inbound and not (fluentd_forward_port or fluentd_http_port)
# output: "Inbound network traffic to Fluentd on unexpected port (connection=%fd.name)"
# priority: WARNING
# - rule: Tdagent unexpected network outbound traffic
# desc: outbound network traffic from fluentd on a port other than the standard ports
# condition: user.name = td-agent and outbound and not fluentd_forward_port
# output: "Outbound network traffic from Fluentd on unexpected port (connection=%fd.name)"
# priority: WARNING
# Gearman ports
# http://gearman.org/protocol/
# - rule: Gearman unexpected network outbound traffic
# desc: outbound network traffic from gearman on a port other than the standard ports
# condition: user.name = gearman and outbound and outbound and not fd.sport = 4730
# output: "Outbound network traffic from Gearman on unexpected port (connection=%fd.name)"
# priority: WARNING
# Zookeeper
- macro: zookeeper_port
condition: fd.sport = 2181
# Kafka ports
# - rule: Kafka unexpected network inbound traffic
# desc: inbound network traffic to kafka on a port other than the standard ports
# condition: user.name = kafka and inbound and fd.sport != 9092
# output: "Inbound network traffic to Kafka on unexpected port (connection=%fd.name)"
# priority: WARNING
# Memcached ports
# - rule: Memcached unexpected network inbound traffic
# desc: inbound network traffic to memcached on a port other than the standard ports
# condition: user.name = memcached and inbound and fd.sport != 11211
# output: "Inbound network traffic to Memcached on unexpected port (connection=%fd.name)"
# priority: WARNING
# - rule: Memcached unexpected network outbound traffic
# desc: any outbound network traffic from memcached. memcached never initiates outbound connections.
# condition: user.name = memcached and outbound
# output: "Unexpected Memcached outbound connection (connection=%fd.name)"
# priority: WARNING
# MongoDB ports
- macro: mongodb_server_port
condition: fd.sport = 27017
- macro: mongodb_shardserver_port
condition: fd.sport = 27018
- macro: mongodb_configserver_port
condition: fd.sport = 27019
- macro: mongodb_webserver_port
condition: fd.sport = 28017
# - rule: Mongodb unexpected network inbound traffic
# desc: inbound network traffic to mongodb on a port other than the standard ports
# condition: >
# user.name = mongodb and inbound and not (mongodb_server_port or
# mongodb_shardserver_port or mongodb_configserver_port or mongodb_webserver_port)
# output: "Inbound network traffic to MongoDB on unexpected port (connection=%fd.name)"
# priority: WARNING
# MySQL ports
# - rule: Mysql unexpected network inbound traffic
# desc: inbound network traffic to mysql on a port other than the standard ports
# condition: user.name = mysql and inbound and fd.sport != 3306
# output: "Inbound network traffic to MySQL on unexpected port (connection=%fd.name)"
# priority: WARNING
# - rule: HTTP server unexpected network inbound traffic
# desc: inbound network traffic to a http server program on a port other than the standard ports
# condition: proc.name in (http_server_binaries) and inbound and fd.sport != 80 and fd.sport != 443
# output: "Inbound network traffic to HTTP Server on unexpected port (connection=%fd.name)"
# priority: WARNING

View File

@ -0,0 +1,441 @@
#
# Copyright (C) 2022 The Falco Authors.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# All rules files related to plugins should require engine version 10
- required_engine_version: 10
# These rules can be read by cloudtrail plugin version 0.1.0, or
# anything semver-compatible.
- required_plugin_versions:
- name: cloudtrail
version: 0.2.3
- name: json
version: 0.2.2
# Note that this rule is disabled by default. It's useful only to
# verify that the cloudtrail plugin is sending events properly. The
# very broad condition evt.num > 0 only works because the rule source
# is limited to aws_cloudtrail. This ensures that the only events that
# are matched against the rule are from the cloudtrail plugin (or
# a different plugin with the same source).
- rule: All Cloudtrail Events
desc: Match all cloudtrail events.
condition:
evt.num > 0
output: Some Cloudtrail Event (evtnum=%evt.num info=%evt.plugininfo ts=%evt.time.iso8601 id=%ct.id error=%ct.error)
priority: DEBUG
tags:
- cloud
- aws
source: aws_cloudtrail
enabled: false
- rule: Console Login Through Assume Role
desc: Detect a console login through Assume Role.
condition:
ct.name="ConsoleLogin" and not ct.error exists
and ct.user.identitytype="AssumedRole"
and json.value[/responseElements/ConsoleLogin]="Success"
output:
Detected a console login through Assume Role
(principal=%ct.user.principalid,
assumedRole=%ct.user.arn,
requesting IP=%ct.srcip,
AWS region=%ct.region)
priority: WARNING
tags:
- cloud
- aws
- aws_console
- aws_iam
source: aws_cloudtrail
- rule: Console Login Without MFA
desc: Detect a console login without MFA.
condition:
ct.name="ConsoleLogin" and not ct.error exists
and ct.user.identitytype!="AssumedRole"
and json.value[/responseElements/ConsoleLogin]="Success"
and json.value[/additionalEventData/MFAUsed]="No"
output:
Detected a console login without MFA
(requesting user=%ct.user,
requesting IP=%ct.srcip,
AWS region=%ct.region)
priority: CRITICAL
tags:
- cloud
- aws
- aws_console
- aws_iam
source: aws_cloudtrail
- rule: Console Root Login Without MFA
desc: Detect root console login without MFA.
condition:
ct.name="ConsoleLogin" and not ct.error exists
and json.value[/additionalEventData/MFAUsed]="No"
and ct.user.identitytype!="AssumedRole"
and json.value[/responseElements/ConsoleLogin]="Success"
and ct.user.identitytype="Root"
output:
Detected a root console login without MFA.
(requesting user=%ct.user,
requesting IP=%ct.srcip,
AWS region=%ct.region)
priority: CRITICAL
tags:
- cloud
- aws
- aws_console
- aws_iam
source: aws_cloudtrail
- rule: Deactivate MFA for Root User
desc: Detect deactivating MFA configuration for root.
condition:
ct.name="DeactivateMFADevice" and not ct.error exists
and ct.user.identitytype="Root"
and ct.request.username="AWS ROOT USER"
output:
Multi Factor Authentication configuration has been disabled for root
(requesting user=%ct.user,
requesting IP=%ct.srcip,
AWS region=%ct.region,
MFA serial number=%ct.request.serialnumber)
priority: CRITICAL
tags:
- cloud
- aws
- aws_iam
source: aws_cloudtrail
- rule: Create AWS user
desc: Detect creation of a new AWS user.
condition:
ct.name="CreateUser" and not ct.error exists
output:
A new AWS user has been created
(requesting user=%ct.user,
requesting IP=%ct.srcip,
AWS region=%ct.region,
new user created=%ct.request.username)
priority: INFO
tags:
- cloud
- aws
- aws_iam
source: aws_cloudtrail
- rule: Create Group
desc: Detect creation of a new user group.
condition:
ct.name="CreateGroup" and not ct.error exists
output:
A new user group has been created.
(requesting user=%ct.user,
requesting IP=%ct.srcip,
AWS region=%ct.region,
group name=%ct.request.groupname)
priority: WARNING
tags:
- cloud
- aws
- aws_iam
source: aws_cloudtrail
- rule: Delete Group
desc: Detect deletion of a user group.
condition:
ct.name="DeleteGroup" and not ct.error exists
output:
A user group has been deleted.
(requesting user=%ct.user,
requesting IP=%ct.srcip,
AWS region=%ct.region,
group name=%ct.request.groupname)
priority: WARNING
tags:
- cloud
- aws
- aws_iam
source: aws_cloudtrail
- rule: ECS Service Created
desc: Detect a new service is created in ECS.
condition:
ct.src="ecs.amazonaws.com" and
ct.name="CreateService" and
not ct.error exists
output:
A new service has been created in ECS
(requesting user=%ct.user,
requesting IP=%ct.srcip,
AWS region=%ct.region,
cluster=%ct.request.cluster,
service name=%ct.request.servicename,
task definition=%ct.request.taskdefinition)
priority: WARNING
tags:
- cloud
- aws
- aws_ecs
- aws_fargate
source: aws_cloudtrail
- rule: ECS Task Run or Started
desc: Detect a new task is started in ECS.
condition:
ct.src="ecs.amazonaws.com" and
(ct.name="RunTask" or ct.name="StartTask") and
not ct.error exists
output:
A new task has been started in ECS
(requesting user=%ct.user,
requesting IP=%ct.srcip,
AWS region=%ct.region,
cluster=%ct.request.cluster,
task definition=%ct.request.taskdefinition)
priority: WARNING
tags:
- cloud
- aws
- aws_ecs
- aws_fargate
source: aws_cloudtrail
- rule: Create Lambda Function
desc: Detect creation of a Lambda function.
condition:
ct.name="CreateFunction20150331" and not ct.error exists
output:
Lambda function has been created.
(requesting user=%ct.user,
requesting IP=%ct.srcip,
AWS region=%ct.region,
lambda function=%ct.request.functionname)
priority: WARNING
tags:
- cloud
- aws
- aws_lambda
source: aws_cloudtrail
- rule: Update Lambda Function Code
desc: Detect updates to a Lambda function code.
condition:
ct.name="UpdateFunctionCode20150331v2" and not ct.error exists
output:
The code of a Lambda function has been updated.
(requesting user=%ct.user,
requesting IP=%ct.srcip,
AWS region=%ct.region,
lambda function=%ct.request.functionname)
priority: WARNING
tags:
- cloud
- aws
- aws_lambda
source: aws_cloudtrail
- rule: Update Lambda Function Configuration
desc: Detect updates to a Lambda function configuration.
condition:
ct.name="UpdateFunctionConfiguration20150331v2" and not ct.error exists
output:
The configuration of a Lambda function has been updated.
(requesting user=%ct.user,
requesting IP=%ct.srcip,
AWS region=%ct.region,
lambda function=%ct.request.functionname)
priority: WARNING
tags:
- cloud
- aws
- aws_lambda
source: aws_cloudtrail
- rule: Run Instances
desc: Detect launching of a specified number of instances.
condition:
ct.name="RunInstances" and not ct.error exists
output:
A number of instances have been launched.
(requesting user=%ct.user,
requesting IP=%ct.srcip,
AWS region=%ct.region,
availability zone=%ct.request.availabilityzone,
subnet id=%ct.response.subnetid,
reservation id=%ct.response.reservationid)
priority: WARNING
tags:
- cloud
- aws
- aws_ec2
source: aws_cloudtrail
# Only instances launched on regions in this list are approved.
- list: approved_regions
items:
- us-east-0
- rule: Run Instances in Non-approved Region
desc: Detect launching of a specified number of instances in a non-approved region.
condition:
ct.name="RunInstances" and not ct.error exists and
not ct.region in (approved_regions)
output:
A number of instances have been launched in a non-approved region.
(requesting user=%ct.user,
requesting IP=%ct.srcip,
AWS region=%ct.region,
availability zone=%ct.request.availabilityzone,
subnet id=%ct.response.subnetid,
reservation id=%ct.response.reservationid,
image id=%json.value[/responseElements/instancesSet/items/0/instanceId])
priority: WARNING
tags:
- cloud
- aws
- aws_ec2
source: aws_cloudtrail
- rule: Delete Bucket Encryption
desc: Detect deleting configuration to use encryption for bucket storage.
condition:
ct.name="DeleteBucketEncryption" and not ct.error exists
output:
A encryption configuration for a bucket has been deleted
(requesting user=%ct.user,
requesting IP=%ct.srcip,
AWS region=%ct.region,
bucket=%s3.bucket)
priority: CRITICAL
tags:
- cloud
- aws
- aws_s3
source: aws_cloudtrail
- rule: Delete Bucket Public Access Block
desc: Detect deleting blocking public access to bucket.
condition:
ct.name="PutBucketPublicAccessBlock" and not ct.error exists and
json.value[/requestParameters/publicAccessBlock]="" and
(json.value[/requestParameters/PublicAccessBlockConfiguration/RestrictPublicBuckets]=false or
json.value[/requestParameters/PublicAccessBlockConfiguration/BlockPublicPolicy]=false or
json.value[/requestParameters/PublicAccessBlockConfiguration/BlockPublicAcls]=false or
json.value[/requestParameters/PublicAccessBlockConfiguration/IgnorePublicAcls]=false)
output:
A public access block for a bucket has been deleted
(requesting user=%ct.user,
requesting IP=%ct.srcip,
AWS region=%ct.region,
bucket=%s3.bucket)
priority: CRITICAL
tags:
- cloud
- aws
- aws_s3
source: aws_cloudtrail
- rule: List Buckets
desc: Detect listing of all S3 buckets.
condition:
ct.name="ListBuckets" and not ct.error exists
output:
A list of all S3 buckets has been requested.
(requesting user=%ct.user,
requesting IP=%ct.srcip,
AWS region=%ct.region,
host=%ct.request.host)
priority: WARNING
enabled: false
tags:
- cloud
- aws
- aws_s3
source: aws_cloudtrail
- rule: Put Bucket ACL
desc: Detect setting the permissions on an existing bucket using access control lists.
condition:
ct.name="PutBucketAcl" and not ct.error exists
output:
The permissions on an existing bucket have been set using access control lists.
(requesting user=%ct.user,
requesting IP=%ct.srcip,
AWS region=%ct.region,
bucket name=%s3.bucket)
priority: WARNING
tags:
- cloud
- aws
- aws_s3
source: aws_cloudtrail
- rule: Put Bucket Policy
desc: Detect applying an Amazon S3 bucket policy to an Amazon S3 bucket.
condition:
ct.name="PutBucketPolicy" and not ct.error exists
output:
An Amazon S3 bucket policy has been applied to an Amazon S3 bucket.
(requesting user=%ct.user,
requesting IP=%ct.srcip,
AWS region=%ct.region,
bucket name=%s3.bucket,
policy=%ct.request.policy)
priority: WARNING
tags:
- cloud
- aws
- aws_s3
source: aws_cloudtrail
- rule: CloudTrail Trail Created
desc: Detect creation of a new trail.
condition:
ct.name="CreateTrail" and not ct.error exists
output:
A new trail has been created.
(requesting user=%ct.user,
requesting IP=%ct.srcip,
AWS region=%ct.region,
trail name=%ct.request.name)
priority: WARNING
tags:
- cloud
- aws
- aws_cloudtrail
source: aws_cloudtrail
- rule: CloudTrail Logging Disabled
desc: The CloudTrail logging has been disabled, this could be potentially malicious.
condition:
ct.name="StopLogging" and not ct.error exists
output:
The CloudTrail logging has been disabled.
(requesting user=%ct.user,
requesting IP=%ct.srcip,
AWS region=%ct.region,
resource name=%ct.request.name)
priority: WARNING
tags:
- cloud
- aws
- aws_cloudtrail
source: aws_cloudtrail

View File

@ -0,0 +1,30 @@
#
# Copyright (C) 2019 The Falco Authors.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
####################
# Your custom rules!
####################
# Add new rules, like this one
# - rule: The program "sudo" is run in a container
# desc: An event will trigger every time you run sudo in a container
# condition: evt.type = execve and evt.dir=< and container.id != host and proc.name = sudo
# output: "Sudo run in container (user=%user.name %container.info parent=%proc.pname cmdline=%proc.cmdline)"
# priority: ERROR
# tags: [users, container]
# Or override/append to any rule, macro, or list from the Default Rules

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,669 @@
#
# Copyright (C) 2019 The Falco Authors.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
- required_engine_version: 2
# Like always_true/always_false, but works with k8s audit events
- macro: k8s_audit_always_true
condition: (jevt.rawtime exists)
- macro: k8s_audit_never_true
condition: (jevt.rawtime=0)
# Generally only consider audit events once the response has completed
- list: k8s_audit_stages
items: ["ResponseComplete"]
# Generally exclude users starting with "system:"
- macro: non_system_user
condition: (not ka.user.name startswith "system:")
# This macro selects the set of Audit Events used by the below rules.
- macro: kevt
condition: (jevt.value[/stage] in (k8s_audit_stages))
- macro: kevt_started
condition: (jevt.value[/stage]=ResponseStarted)
# If you wish to restrict activity to a specific set of users, override/append to this list.
# users created by kops are included
- list: vertical_pod_autoscaler_users
items: ["vpa-recommender", "vpa-updater"]
- list: allowed_k8s_users
items: [
"minikube", "minikube-user", "kubelet", "kops", "admin", "kube", "kube-proxy", "kube-apiserver-healthcheck",
"kubernetes-admin",
vertical_pod_autoscaler_users,
cluster-autoscaler,
"system:addon-manager",
"cloud-controller-manager",
"eks:node-manager",
"system:kube-controller-manager"
]
- rule: Disallowed K8s User
desc: Detect any k8s operation by users outside of an allowed set of users.
condition: kevt and non_system_user and not ka.user.name in (allowed_k8s_users)
output: K8s Operation performed by user not in allowed list of users (user=%ka.user.name target=%ka.target.name/%ka.target.resource verb=%ka.verb uri=%ka.uri resp=%ka.response.code)
priority: WARNING
source: k8s_audit
tags: [k8s]
# In a local/user rules file, you could override this macro to
# explicitly enumerate the container images that you want to run in
# your environment. In this main falco rules file, there isn't any way
# to know all the containers that can run, so any container is
# allowed, by using the always_true macro. In the overridden macro, the condition
# would look something like (ka.req.pod.containers.image.repository in (my-repo/my-image))
- macro: allowed_k8s_containers
condition: (k8s_audit_always_true)
- macro: response_successful
condition: (ka.response.code startswith 2)
- macro: kcreate
condition: ka.verb=create
- macro: kmodify
condition: (ka.verb in (create,update,patch))
- macro: kdelete
condition: ka.verb=delete
- macro: pod
condition: ka.target.resource=pods and not ka.target.subresource exists
- macro: pod_subresource
condition: ka.target.resource=pods and ka.target.subresource exists
- macro: deployment
condition: ka.target.resource=deployments
- macro: service
condition: ka.target.resource=services
- macro: configmap
condition: ka.target.resource=configmaps
- macro: namespace
condition: ka.target.resource=namespaces
- macro: serviceaccount
condition: ka.target.resource=serviceaccounts
- macro: clusterrole
condition: ka.target.resource=clusterroles
- macro: clusterrolebinding
condition: ka.target.resource=clusterrolebindings
- macro: role
condition: ka.target.resource=roles
- macro: secret
condition: ka.target.resource=secrets
- macro: health_endpoint
condition: ka.uri=/healthz
- rule: Create Disallowed Pod
desc: >
Detect an attempt to start a pod with a container image outside of a list of allowed images.
condition: kevt and pod and kcreate and not allowed_k8s_containers
output: Pod started with container not in allowed list (user=%ka.user.name pod=%ka.resp.name ns=%ka.target.namespace images=%ka.req.pod.containers.image)
priority: WARNING
source: k8s_audit
tags: [k8s]
- rule: Create Privileged Pod
desc: >
Detect an attempt to start a pod with a privileged container
condition: kevt and pod and kcreate and ka.req.pod.containers.privileged intersects (true) and not ka.req.pod.containers.image.repository in (falco_privileged_images)
output: Pod started with privileged container (user=%ka.user.name pod=%ka.resp.name ns=%ka.target.namespace images=%ka.req.pod.containers.image)
priority: WARNING
source: k8s_audit
tags: [k8s]
- macro: sensitive_vol_mount
condition: >
(ka.req.pod.volumes.hostpath intersects (/proc, /var/run/docker.sock, /, /etc, /root, /var/run/crio/crio.sock, /home/admin, /var/lib/kubelet, /var/lib/kubelet/pki, /etc/kubernetes, /etc/kubernetes/manifests))
- rule: Create Sensitive Mount Pod
desc: >
Detect an attempt to start a pod with a volume from a sensitive host directory (i.e. /proc).
Exceptions are made for known trusted images.
condition: kevt and pod and kcreate and sensitive_vol_mount and not ka.req.pod.containers.image.repository in (falco_sensitive_mount_images)
output: Pod started with sensitive mount (user=%ka.user.name pod=%ka.resp.name ns=%ka.target.namespace images=%ka.req.pod.containers.image volumes=%jevt.value[/requestObject/spec/volumes])
priority: WARNING
source: k8s_audit
tags: [k8s]
# These container images are allowed to run with hostnetwork=true
- list: falco_hostnetwork_images
items: [
gcr.io/google-containers/prometheus-to-sd,
gcr.io/projectcalico-org/typha,
gcr.io/projectcalico-org/node,
gke.gcr.io/gke-metadata-server,
gke.gcr.io/kube-proxy,
gke.gcr.io/netd-amd64,
k8s.gcr.io/ip-masq-agent-amd64
k8s.gcr.io/prometheus-to-sd,
]
# Corresponds to K8s CIS Benchmark 1.7.4
- rule: Create HostNetwork Pod
desc: Detect an attempt to start a pod using the host network.
condition: kevt and pod and kcreate and ka.req.pod.host_network intersects (true) and not ka.req.pod.containers.image.repository in (falco_hostnetwork_images)
output: Pod started using host network (user=%ka.user.name pod=%ka.resp.name ns=%ka.target.namespace images=%ka.req.pod.containers.image)
priority: WARNING
source: k8s_audit
tags: [k8s]
- macro: user_known_node_port_service
condition: (k8s_audit_never_true)
- rule: Create NodePort Service
desc: >
Detect an attempt to start a service with a NodePort service type
condition: kevt and service and kcreate and ka.req.service.type=NodePort and not user_known_node_port_service
output: NodePort Service Created (user=%ka.user.name service=%ka.target.name ns=%ka.target.namespace ports=%ka.req.service.ports)
priority: WARNING
source: k8s_audit
tags: [k8s]
- macro: contains_private_credentials
condition: >
(ka.req.configmap.obj contains "aws_access_key_id" or
ka.req.configmap.obj contains "aws-access-key-id" or
ka.req.configmap.obj contains "aws_s3_access_key_id" or
ka.req.configmap.obj contains "aws-s3-access-key-id" or
ka.req.configmap.obj contains "password" or
ka.req.configmap.obj contains "passphrase")
- rule: Create/Modify Configmap With Private Credentials
desc: >
Detect creating/modifying a configmap containing a private credential (aws key, password, etc.)
condition: kevt and configmap and kmodify and contains_private_credentials
output: K8s configmap with private credential (user=%ka.user.name verb=%ka.verb configmap=%ka.req.configmap.name config=%ka.req.configmap.obj)
priority: WARNING
source: k8s_audit
tags: [k8s]
# Corresponds to K8s CIS Benchmark, 1.1.1.
- rule: Anonymous Request Allowed
desc: >
Detect any request made by the anonymous user that was allowed
condition: kevt and ka.user.name=system:anonymous and ka.auth.decision="allow" and not health_endpoint
output: Request by anonymous user allowed (user=%ka.user.name verb=%ka.verb uri=%ka.uri reason=%ka.auth.reason))
priority: WARNING
source: k8s_audit
tags: [k8s]
# Roughly corresponds to K8s CIS Benchmark, 1.1.12. In this case,
# notifies an attempt to exec/attach to a privileged container.
# Ideally, we'd add a more stringent rule that detects attaches/execs
# to a privileged pod, but that requires the engine for k8s audit
# events to be stateful, so it could know if a container named in an
# attach request was created privileged or not. For now, we have a
# less severe rule that detects attaches/execs to any pod.
#
# For the same reason, you can't use things like image names/prefixes,
# as the event that creates the pod (which has the images) is a
# separate event than the actual exec/attach to the pod.
- macro: user_known_exec_pod_activities
condition: (k8s_audit_never_true)
- rule: Attach/Exec Pod
desc: >
Detect any attempt to attach/exec to a pod
condition: kevt_started and pod_subresource and kcreate and ka.target.subresource in (exec,attach) and not user_known_exec_pod_activities
output: Attach/Exec to pod (user=%ka.user.name pod=%ka.target.name ns=%ka.target.namespace action=%ka.target.subresource command=%ka.uri.param[command])
priority: NOTICE
source: k8s_audit
tags: [k8s]
- macro: user_known_pod_debug_activities
condition: (k8s_audit_never_true)
# Only works when feature gate EphemeralContainers is enabled
- rule: EphemeralContainers Created
desc: >
Detect any ephemeral container created
condition: kevt and pod_subresource and kmodify and ka.target.subresource in (ephemeralcontainers) and not user_known_pod_debug_activities
output: Ephemeral container is created in pod (user=%ka.user.name pod=%ka.target.name ns=%ka.target.namespace ephemeral_container_name=%jevt.value[/requestObject/ephemeralContainers/0/name] ephemeral_container_image=%jevt.value[/requestObject/ephemeralContainers/0/image])
priority: NOTICE
source: k8s_audit
tags: [k8s]
# In a local/user rules fie, you can append to this list to add additional allowed namespaces
- list: allowed_namespaces
items: [kube-system, kube-public, default]
- rule: Create Disallowed Namespace
desc: Detect any attempt to create a namespace outside of a set of known namespaces
condition: kevt and namespace and kcreate and not ka.target.name in (allowed_namespaces)
output: Disallowed namespace created (user=%ka.user.name ns=%ka.target.name)
priority: WARNING
source: k8s_audit
tags: [k8s]
# Only defined for backwards compatibility. Use the more specific
# user_allowed_kube_namespace_image_list instead.
- list: user_trusted_image_list
items: []
- list: user_allowed_kube_namespace_image_list
items: [user_trusted_image_list]
# Only defined for backwards compatibility. Use the more specific
# allowed_kube_namespace_image_list instead.
- list: k8s_image_list
items: []
- list: allowed_kube_namespace_image_list
items: [
gcr.io/google-containers/prometheus-to-sd,
gcr.io/projectcalico-org/node,
gke.gcr.io/addon-resizer,
gke.gcr.io/heapster,
gke.gcr.io/gke-metadata-server,
k8s.gcr.io/ip-masq-agent-amd64,
k8s.gcr.io/kube-apiserver,
gke.gcr.io/kube-proxy,
gke.gcr.io/netd-amd64,
k8s.gcr.io/addon-resizer,
k8s.gcr.io/prometheus-to-sd,
k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64,
k8s.gcr.io/k8s-dns-kube-dns-amd64,
k8s.gcr.io/k8s-dns-sidecar-amd64,
k8s.gcr.io/metrics-server-amd64,
kope/kube-apiserver-healthcheck,
k8s_image_list
]
- macro: allowed_kube_namespace_pods
condition: (ka.req.pod.containers.image.repository in (user_allowed_kube_namespace_image_list) or
ka.req.pod.containers.image.repository in (allowed_kube_namespace_image_list))
# Detect any new pod created in the kube-system namespace
- rule: Pod Created in Kube Namespace
desc: Detect any attempt to create a pod in the kube-system or kube-public namespaces
condition: kevt and pod and kcreate and ka.target.namespace in (kube-system, kube-public) and not allowed_kube_namespace_pods
output: Pod created in kube namespace (user=%ka.user.name pod=%ka.resp.name ns=%ka.target.namespace images=%ka.req.pod.containers.image)
priority: WARNING
source: k8s_audit
tags: [k8s]
- list: user_known_sa_list
items: []
- list: known_sa_list
items: [
coredns,
coredns-autoscaler,
cronjob-controller,
daemon-set-controller,
deployment-controller,
disruption-controller,
endpoint-controller,
endpointslice-controller,
endpointslicemirroring-controller,
generic-garbage-collector,
horizontal-pod-autoscaler,
job-controller,
namespace-controller,
node-controller,
persistent-volume-binder,
pod-garbage-collector,
pv-protection-controller,
pvc-protection-controller,
replicaset-controller,
resourcequota-controller,
root-ca-cert-publisher,
service-account-controller,
statefulset-controller
]
- macro: trusted_sa
condition: (ka.target.name in (known_sa_list, user_known_sa_list))
# Detect creating a service account in the kube-system/kube-public namespace
- rule: Service Account Created in Kube Namespace
desc: Detect any attempt to create a serviceaccount in the kube-system or kube-public namespaces
condition: kevt and serviceaccount and kcreate and ka.target.namespace in (kube-system, kube-public) and response_successful and not trusted_sa
output: Service account created in kube namespace (user=%ka.user.name serviceaccount=%ka.target.name ns=%ka.target.namespace)
priority: WARNING
source: k8s_audit
tags: [k8s]
# Detect any modify/delete to any ClusterRole starting with
# "system:". "system:coredns" is excluded as changes are expected in
# normal operation.
- rule: System ClusterRole Modified/Deleted
desc: Detect any attempt to modify/delete a ClusterRole/Role starting with system
condition: kevt and (role or clusterrole) and (kmodify or kdelete) and (ka.target.name startswith "system:") and
not ka.target.name in (system:coredns, system:managed-certificate-controller)
output: System ClusterRole/Role modified or deleted (user=%ka.user.name role=%ka.target.name ns=%ka.target.namespace action=%ka.verb)
priority: WARNING
source: k8s_audit
tags: [k8s]
# Detect any attempt to create a ClusterRoleBinding to the cluster-admin user
# (expand this to any built-in cluster role that does "sensitive" things)
- rule: Attach to cluster-admin Role
desc: Detect any attempt to create a ClusterRoleBinding to the cluster-admin user
condition: kevt and clusterrolebinding and kcreate and ka.req.binding.role=cluster-admin
output: Cluster Role Binding to cluster-admin role (user=%ka.user.name subject=%ka.req.binding.subjects)
priority: WARNING
source: k8s_audit
tags: [k8s]
- rule: ClusterRole With Wildcard Created
desc: Detect any attempt to create a Role/ClusterRole with wildcard resources or verbs
condition: kevt and (role or clusterrole) and kcreate and (ka.req.role.rules.resources intersects ("*") or ka.req.role.rules.verbs intersects ("*"))
output: Created Role/ClusterRole with wildcard (user=%ka.user.name role=%ka.target.name rules=%ka.req.role.rules)
priority: WARNING
source: k8s_audit
tags: [k8s]
- macro: writable_verbs
condition: >
(ka.req.role.rules.verbs intersects (create, update, patch, delete, deletecollection))
- rule: ClusterRole With Write Privileges Created
desc: Detect any attempt to create a Role/ClusterRole that can perform write-related actions
condition: kevt and (role or clusterrole) and kcreate and writable_verbs
output: Created Role/ClusterRole with write privileges (user=%ka.user.name role=%ka.target.name rules=%ka.req.role.rules)
priority: NOTICE
source: k8s_audit
tags: [k8s]
- rule: ClusterRole With Pod Exec Created
desc: Detect any attempt to create a Role/ClusterRole that can exec to pods
condition: kevt and (role or clusterrole) and kcreate and ka.req.role.rules.resources intersects ("pods/exec")
output: Created Role/ClusterRole with pod exec privileges (user=%ka.user.name role=%ka.target.name rules=%ka.req.role.rules)
priority: WARNING
source: k8s_audit
tags: [k8s]
# The rules below this point are less discriminatory and generally
# represent a stream of activity for a cluster. If you wish to disable
# these events, modify the following macro.
- macro: consider_activity_events
condition: (k8s_audit_always_true)
- macro: kactivity
condition: (kevt and consider_activity_events)
- rule: K8s Deployment Created
desc: Detect any attempt to create a deployment
condition: (kactivity and kcreate and deployment and response_successful)
output: K8s Deployment Created (user=%ka.user.name deployment=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
priority: INFO
source: k8s_audit
tags: [k8s]
- rule: K8s Deployment Deleted
desc: Detect any attempt to delete a deployment
condition: (kactivity and kdelete and deployment and response_successful)
output: K8s Deployment Deleted (user=%ka.user.name deployment=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
priority: INFO
source: k8s_audit
tags: [k8s]
- rule: K8s Service Created
desc: Detect any attempt to create a service
condition: (kactivity and kcreate and service and response_successful)
output: K8s Service Created (user=%ka.user.name service=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
priority: INFO
source: k8s_audit
tags: [k8s]
- rule: K8s Service Deleted
desc: Detect any attempt to delete a service
condition: (kactivity and kdelete and service and response_successful)
output: K8s Service Deleted (user=%ka.user.name service=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
priority: INFO
source: k8s_audit
tags: [k8s]
- rule: K8s ConfigMap Created
desc: Detect any attempt to create a configmap
condition: (kactivity and kcreate and configmap and response_successful)
output: K8s ConfigMap Created (user=%ka.user.name configmap=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
priority: INFO
source: k8s_audit
tags: [k8s]
- rule: K8s ConfigMap Deleted
desc: Detect any attempt to delete a configmap
condition: (kactivity and kdelete and configmap and response_successful)
output: K8s ConfigMap Deleted (user=%ka.user.name configmap=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
priority: INFO
source: k8s_audit
tags: [k8s]
- rule: K8s Namespace Created
desc: Detect any attempt to create a namespace
condition: (kactivity and kcreate and namespace and response_successful)
output: K8s Namespace Created (user=%ka.user.name namespace=%ka.target.name resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
priority: INFO
source: k8s_audit
tags: [k8s]
- rule: K8s Namespace Deleted
desc: Detect any attempt to delete a namespace
condition: (kactivity and non_system_user and kdelete and namespace and response_successful)
output: K8s Namespace Deleted (user=%ka.user.name namespace=%ka.target.name resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
priority: INFO
source: k8s_audit
tags: [k8s]
- rule: K8s Serviceaccount Created
desc: Detect any attempt to create a service account
condition: (kactivity and kcreate and serviceaccount and response_successful)
output: K8s Serviceaccount Created (user=%ka.user.name user=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
priority: INFO
source: k8s_audit
tags: [k8s]
- rule: K8s Serviceaccount Deleted
desc: Detect any attempt to delete a service account
condition: (kactivity and kdelete and serviceaccount and response_successful)
output: K8s Serviceaccount Deleted (user=%ka.user.name user=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
priority: INFO
source: k8s_audit
tags: [k8s]
- rule: K8s Role/Clusterrole Created
desc: Detect any attempt to create a cluster role/role
condition: (kactivity and kcreate and (clusterrole or role) and response_successful)
output: K8s Cluster Role Created (user=%ka.user.name role=%ka.target.name rules=%ka.req.role.rules resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
priority: INFO
source: k8s_audit
tags: [k8s]
- rule: K8s Role/Clusterrole Deleted
desc: Detect any attempt to delete a cluster role/role
condition: (kactivity and kdelete and (clusterrole or role) and response_successful)
output: K8s Cluster Role Deleted (user=%ka.user.name role=%ka.target.name resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
priority: INFO
source: k8s_audit
tags: [k8s]
- rule: K8s Role/Clusterrolebinding Created
desc: Detect any attempt to create a clusterrolebinding
condition: (kactivity and kcreate and clusterrolebinding and response_successful)
output: K8s Cluster Role Binding Created (user=%ka.user.name binding=%ka.target.name subjects=%ka.req.binding.subjects role=%ka.req.binding.role resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
priority: INFO
source: k8s_audit
tags: [k8s]
- rule: K8s Role/Clusterrolebinding Deleted
desc: Detect any attempt to delete a clusterrolebinding
condition: (kactivity and kdelete and clusterrolebinding and response_successful)
output: K8s Cluster Role Binding Deleted (user=%ka.user.name binding=%ka.target.name resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
priority: INFO
source: k8s_audit
tags: [k8s]
- rule: K8s Secret Created
desc: Detect any attempt to create a secret. Service account tokens are excluded.
condition: (kactivity and kcreate and secret and ka.target.namespace!=kube-system and non_system_user and response_successful)
output: K8s Secret Created (user=%ka.user.name secret=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
priority: INFO
source: k8s_audit
tags: [k8s]
- rule: K8s Secret Deleted
desc: Detect any attempt to delete a secret Service account tokens are excluded.
condition: (kactivity and kdelete and secret and ka.target.namespace!=kube-system and non_system_user and response_successful)
output: K8s Secret Deleted (user=%ka.user.name secret=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
priority: INFO
source: k8s_audit
tags: [k8s]
# This rule generally matches all events, and as a result is disabled
# by default. If you wish to enable these events, modify the
# following macro.
# condition: (jevt.rawtime exists)
- macro: consider_all_events
condition: (k8s_audit_never_true)
- macro: kall
condition: (kevt and consider_all_events)
- rule: All K8s Audit Events
desc: Match all K8s Audit Events
condition: kall
output: K8s Audit Event received (user=%ka.user.name verb=%ka.verb uri=%ka.uri obj=%jevt.obj)
priority: DEBUG
source: k8s_audit
tags: [k8s]
# This macro disables following rule, change to k8s_audit_never_true to enable it
- macro: allowed_full_admin_users
condition: (k8s_audit_always_true)
# This list includes some of the default user names for an administrator in several K8s installations
- list: full_admin_k8s_users
items: ["admin", "kubernetes-admin", "kubernetes-admin@kubernetes", "kubernetes-admin@cluster.local", "minikube-user"]
# This rules detect an operation triggered by an user name that is
# included in the list of those that are default administrators upon
# cluster creation. This may signify a permission setting too broader.
# As we can't check for role of the user on a general ka.* event, this
# may or may not be an administrator. Customize the full_admin_k8s_users
# list to your needs, and activate at your discretion.
# # How to test:
# # Execute any kubectl command connected using default cluster user, as:
# kubectl create namespace rule-test
- rule: Full K8s Administrative Access
desc: Detect any k8s operation by a user name that may be an administrator with full access.
condition: >
kevt
and non_system_user
and ka.user.name in (full_admin_k8s_users)
and not allowed_full_admin_users
output: K8s Operation performed by full admin user (user=%ka.user.name target=%ka.target.name/%ka.target.resource verb=%ka.verb uri=%ka.uri resp=%ka.response.code)
priority: WARNING
source: k8s_audit
tags: [k8s]
- macro: ingress
condition: ka.target.resource=ingresses
- macro: ingress_tls
condition: (jevt.value[/requestObject/spec/tls] exists)
# # How to test:
# # Create an ingress.yaml file with content:
# apiVersion: networking.k8s.io/v1beta1
# kind: Ingress
# metadata:
# name: test-ingress
# annotations:
# nginx.ingress.kubernetes.io/rewrite-target: /
# spec:
# rules:
# - http:
# paths:
# - path: /testpath
# backend:
# serviceName: test
# servicePort: 80
# # Execute: kubectl apply -f ingress.yaml
- rule: Ingress Object without TLS Certificate Created
desc: Detect any attempt to create an ingress without TLS certification.
condition: >
(kactivity and kcreate and ingress and response_successful and not ingress_tls)
output: >
K8s Ingress Without TLS Cert Created (user=%ka.user.name ingress=%ka.target.name
namespace=%ka.target.namespace)
source: k8s_audit
priority: WARNING
tags: [k8s, network]
- macro: node
condition: ka.target.resource=nodes
- macro: allow_all_k8s_nodes
condition: (k8s_audit_always_true)
- list: allowed_k8s_nodes
items: []
# # How to test:
# # Create a Falco monitored cluster with Kops
# # Increase the number of minimum nodes with:
# kops edit ig nodes
# kops apply --yes
- rule: Untrusted Node Successfully Joined the Cluster
desc: >
Detect a node successfully joined the cluster outside of the list of allowed nodes.
condition: >
kevt and node
and kcreate
and response_successful
and not allow_all_k8s_nodes
and not ka.target.name in (allowed_k8s_nodes)
output: Node not in allowed list successfully joined the cluster (user=%ka.user.name node=%ka.target.name)
priority: ERROR
source: k8s_audit
tags: [k8s]
- rule: Untrusted Node Unsuccessfully Tried to Join the Cluster
desc: >
Detect an unsuccessful attempt to join the cluster for a node not in the list of allowed nodes.
condition: >
kevt and node
and kcreate
and not response_successful
and not allow_all_k8s_nodes
and not ka.target.name in (allowed_k8s_nodes)
output: Node not in allowed list tried unsuccessfully to join the cluster (user=%ka.user.name node=%ka.target.name reason=%ka.response.reason)
priority: WARNING
source: k8s_audit
tags: [k8s]

View File

@ -0,0 +1,24 @@
Falco agents are spinning up on each node in your cluster. After a few
seconds, they are going to start monitoring your containers looking for
security issues.
{{printf "\n" }}
{{- if .Values.integrations }}
WARNING: The following integrations have been deprecated and removed
- gcscc
- natsOutput
- snsOutput
- pubsubOutput
Consider to use falcosidekick (https://github.com/falcosecurity/falcosidekick) as replacement.
{{- else }}
No further action should be required.
{{- end }}
{{printf "\n" }}
{{- if not .Values.falcosidekick.enabled }}
Tip:
You can easily forward Falco events to Slack, Kafka, AWS Lambda and more with falcosidekick.
Full list of outputs: https://github.com/falcosecurity/charts/tree/master/falcosidekick.
You can enable its deployment with `--set falcosidekick.enabled=true` or in your values.yaml.
See: https://github.com/falcosecurity/charts/blob/master/falcosidekick/values.yaml for configuration values.
{{- end}}

View File

@ -0,0 +1,86 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "falco.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "falco.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "falco.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "falco.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "falco.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
Return the proper Falco image name
*/}}
{{- define "falco.image" -}}
{{- $registryName := .Values.image.registry -}}
{{- $repositoryName := .Values.image.repository -}}
{{- $tag := .Values.image.tag | toString -}}
{{/*
Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.
Also, we can't use a single if because lazy evaluation is not an option
*/}}
{{- if .Values.global }}
{{- if .Values.global.imageRegistry }}
{{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}}
{{- else -}}
{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
{{- end -}}
{{- else -}}
{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
{{- end -}}
{{- end -}}
{{/*
Extract the unixSocket's directory path
*/}}
{{- define "falco.unixSocketDir" -}}
{{- if .Values.falco.grpc.unixSocketPath -}}
{{- .Values.falco.grpc.unixSocketPath | trimPrefix "unix://" | dir -}}
{{- end -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for rbac.
*/}}
{{- define "rbac.apiVersion" -}}
{{- if .Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1" }}
{{- print "rbac.authorization.k8s.io/v1" -}}
{{- else -}}
{{- print "rbac.authorization.k8s.io/v1beta1" -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,30 @@
{{- if (and .Values.auditLog.enabled .Values.auditLog.dynamicBackend.enabled) }}
apiVersion: auditregistration.k8s.io/v1alpha1
kind: AuditSink
metadata:
name: {{ template "falco.fullname" . }}
namespace: {{ .Release.Namespace }}
spec:
policy:
level: RequestResponse
stages:
- ResponseComplete
- ResponseStarted
webhook:
throttle:
qps: 10
burst: 15
clientConfig:
{{- if .Values.auditLog.dynamicBackend.url }}
url: {{ .Values.auditLog.dynamicBackend.url }}
{{- else }}
service:
namespace: {{ .Release.Namespace }}
name: {{ template "falco.fullname" . }}
port: {{ .Values.falco.webserver.listenPort }}
path: {{ .Values.falco.webserver.k8sAuditEndpoint }}
{{- end }}
{{- if .Values.falco.webserver.sslEnabled }}
caBundle: {{ .Values.certs.ca.crt | b64enc | quote }}
{{- end}}
{{- end }}

View File

@ -0,0 +1,59 @@
{{- if .Values.rbac.create }}
kind: ClusterRole
apiVersion: {{ template "rbac.apiVersion" . }}
metadata:
name: {{ template "falco.fullname" .}}
labels:
app: {{ template "falco.fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: "{{ .Release.Name }}"
heritage: "{{ .Release.Service }}"
rules:
- apiGroups:
- extensions
- ""
resources:
- nodes
- namespaces
- pods
- replicationcontrollers
- replicasets
- services
- daemonsets
- deployments
- events
- configmaps
verbs:
- get
- list
- watch
- apiGroups:
- apps
resources:
- daemonsets
- deployments
- replicasets
- statefulsets
verbs:
- get
- list
- watch
- nonResourceURLs:
- /healthz
- /healthz/*
verbs:
- get
{{- if .Values.podSecurityPolicy.create }}
- apiGroups:
- policy
resources:
- podsecuritypolicies
resourceNames:
- {{ template "falco.fullname" . }}
{{- if .Values.fakeEventGenerator.enabled }}
- event-generator-{{ template "falco.fullname" . }}
{{- end }}
verbs:
- use
{{- end }}
{{- end }}

View File

@ -0,0 +1,19 @@
{{- if .Values.rbac.create }}
kind: ClusterRoleBinding
apiVersion: {{ template "rbac.apiVersion" . }}
metadata:
name: {{ template "falco.fullname" .}}
labels:
app: {{ template "falco.fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: "{{ .Release.Name }}"
heritage: "{{ .Release.Service }}"
subjects:
- kind: ServiceAccount
name: {{ template "falco.serviceAccountName" .}}
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: {{ template "falco.fullname" .}}
apiGroup: rbac.authorization.k8s.io
{{- end }}

View File

@ -0,0 +1,17 @@
{{- if .Values.customRules }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "falco.fullname" . }}-rules
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "falco.fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: "{{ .Release.Name }}"
heritage: "{{ .Release.Service }}"
data:
{{- range $file, $content := .Values.customRules }}
{{ $file }}: |-
{{ $content | indent 4}}
{{- end }}
{{- end }}

View File

@ -0,0 +1,270 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "falco.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "falco.fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: "{{ .Release.Name }}"
heritage: "{{ .Release.Service }}"
data:
falco.yaml: |-
# File(s) or Directories containing Falco rules, loaded at startup.
# The name "rules_file" is only for backwards compatibility.
# If the entry is a file, it will be read directly. If the entry is a directory,
# every file in that directory will be read, in alphabetical order.
#
# falco_rules.yaml ships with the falco package and is overridden with
# every new software version. falco_rules.local.yaml is only created
# if it doesn't exist. If you want to customize the set of rules, add
# your customizations to falco_rules.local.yaml.
#
# The files will be read in the order presented here, so make sure if
# you have overrides they appear in later files.
rules_file:
{{- range .Values.falco.rulesFile }}
- {{ . }}
{{- end }}
plugins:
{{ toYaml .Values.falco.plugins | indent 8 }}
# Setting this list to empty ensures that the above plugins are *not*
# loaded and enabled by default. If you want to use the above plugins,
# set a meaningful init_config/open_params for the cloudtrail plugin
# and then change this to:
# load_plugins: [cloudtrail, json]
load_plugins:
{{ toYaml .Values.falco.loadPlugins | indent 8 }}
# If true, the times displayed in log messages and output messages
# will be in ISO 8601. By default, times are displayed in the local
# time zone, as governed by /etc/localtime.
time_format_iso_8601: {{ .Values.falco.timeFormatISO8601 }}
# Whether to output events in json or text
{{- if .Values.falcosidekick.enabled }}
json_output: true
{{- else }}
json_output: {{ .Values.falco.jsonOutput }}
{{- end }}
# When using json output, whether or not to include the "output" property
# itself (e.g. "File below a known binary directory opened for writing
# (user=root ....") in the json output.
{{- if .Values.falcosidekick.enabled }}
json_include_output_property: true
{{- else }}
json_include_output_property: {{ .Values.falco.jsonIncludeOutputProperty }}
{{- end }}
# When using json output, whether or not to include the "tags" property
# itself in the json output. If set to true, outputs caused by rules
# with no tags will have a "tags" field set to an empty array. If set to
# false, the "tags" field will not be included in the json output at all.
json_include_tags_property: {{ .Values.falco.jsonIncludeTagsProperty }}
# Send information logs to stderr and/or syslog Note these are *not* security
# notification logs! These are just Falco lifecycle (and possibly error) logs.
log_stderr: {{ .Values.falco.logStderr }}
log_syslog: {{ .Values.falco.logSyslog }}
# Minimum log level to include in logs. Note: these levels are
# separate from the priority field of rules. This refers only to the
# log level of falco's internal logging. Can be one of "emergency",
# "alert", "critical", "error", "warning", "notice", "info", "debug".
log_level: {{ .Values.falco.logLevel }}
# Minimum rule priority level to load and run. All rules having a
# priority more severe than this level will be loaded/run. Can be one
# of "emergency", "alert", "critical", "error", "warning", "notice",
# "info", "debug".
priority: {{ .Values.falco.priority }}
# Whether or not output to any of the output channels below is
# buffered. Defaults to false
buffered_outputs: {{ .Values.falco.bufferedOutputs }}
# Falco uses a shared buffer between the kernel and userspace to pass
# system call information. When Falco detects that this buffer is
# full and system calls have been dropped, it can take one or more of
# the following actions:
# - ignore: do nothing (default when list of actions is empty)
# - log: log a DEBUG message noting that the buffer was full
# - alert: emit a Falco alert noting that the buffer was full
# - exit: exit Falco with a non-zero rc
#
# Notice it is not possible to ignore and log/alert messages at the same time.
#
# The rate at which log/alert messages are emitted is governed by a
# token bucket. The rate corresponds to one message every 30 seconds
# with a burst of one message (by default).
#
# The messages are emitted when the percentage of dropped system calls
# with respect the number of events in the last second
# is greater than the given threshold (a double in the range [0, 1]).
#
# For debugging/testing it is possible to simulate the drops using
# the `simulate_drops: true`. In this case the threshold does not apply.
syscall_event_drops:
threshold: {{ .Values.falco.syscallEventDrops.threshold }}
actions:
{{- range .Values.falco.syscallEventDrops.actions }}
- {{ . }}
{{- end }}
rate: {{ .Values.falco.syscallEventDrops.rate }}
max_burst: {{ .Values.falco.syscallEventDrops.maxBurst }}
# Falco uses a shared buffer between the kernel and userspace to receive
# the events (eg., system call information) in userspace.
#
# Anyways, the underlying libraries can also timeout for various reasons.
# For example, there could have been issues while reading an event.
# Or the particular event needs to be skipped.
# Normally, it's very unlikely that Falco does not receive events consecutively.
#
# Falco is able to detect such uncommon situation.
#
# Here you can configure the maximum number of consecutive timeouts without an event
# after which you want Falco to alert.
# By default this value is set to 1000 consecutive timeouts without an event at all.
# How this value maps to a time interval depends on the CPU frequency.
syscall_event_timeouts:
max_consecutives: {{ .Values.falco.syscallEventTimeouts.maxConsecutives }}
# Falco continuously monitors outputs performance. When an output channel does not allow
# to deliver an alert within a given deadline, an error is reported indicating
# which output is blocking notifications.
# The timeout error will be reported to the log according to the above log_* settings.
# Note that the notification will not be discarded from the output queue; thus,
# output channels may indefinitely remain blocked.
# An output timeout error indeed indicate a misconfiguration issue or I/O problems
# that cannot be recovered by Falco and should be fixed by the user.
#
# The "output_timeout" value specifies the duration in milliseconds to wait before
# considering the deadline exceed.
#
# With a 2000ms default, the notification consumer can block the Falco output
# for up to 2 seconds without reaching the timeout.
output_timeout: {{ .Values.falco.output_timeout }}
# A throttling mechanism implemented as a token bucket limits the
# rate of falco notifications. This throttling is controlled by the following configuration
# options:
# - rate: the number of tokens (i.e. right to send a notification)
# gained per second. Defaults to 1.
# - max_burst: the maximum number of tokens outstanding. Defaults to 1000.
#
# With these defaults, falco could send up to 1000 notifications after
# an initial quiet period, and then up to 1 notification per second
# afterward. It would gain the full burst back after 1000 seconds of
# no activity.
outputs:
rate: {{ .Values.falco.outputs.rate }}
max_burst: {{ .Values.falco.outputs.maxBurst }}
# Where security notifications should go.
# Multiple outputs can be enabled.
syslog_output:
enabled: {{ .Values.falco.syslogOutput.enabled }}
# If keep_alive is set to true, the file will be opened once and
# continuously written to, with each output message on its own
# line. If keep_alive is set to false, the file will be re-opened
# for each output message.
#
# Also, the file will be closed and reopened if falco is signaled with
# SIGUSR1.
file_output:
enabled: {{ .Values.falco.fileOutput.enabled }}
keep_alive: {{ .Values.falco.fileOutput.keepAlive }}
filename: {{ .Values.falco.fileOutput.filename }}
stdout_output:
enabled: {{ .Values.falco.stdoutOutput.enabled }}
# Falco contains an embedded webserver that can be used to accept K8s
# Audit Events. These config options control the behavior of that
# webserver. (By default, the webserver is disabled).
#
# The ssl_certificate is a combination SSL Certificate and corresponding
# key contained in a single file. You can generate a key/cert as follows:
#
# $ openssl req -newkey rsa:2048 -nodes -keyout key.pem -x509 -days 365 -out certificate.pem
# $ cat certificate.pem key.pem > falco.pem
# $ sudo cp falco.pem /etc/falco/falco.pem
webserver:
enabled: {{ .Values.falco.webserver.enabled }}
listen_port: {{ .Values.falco.webserver.listenPort }}
k8s_audit_endpoint: {{ .Values.falco.webserver.k8sAuditEndpoint }}
k8s_healthz_endpoint: {{ .Values.falco.webserver.k8sHealthzEndpoint }}
ssl_enabled: {{ .Values.falco.webserver.sslEnabled }}
ssl_certificate: {{ .Values.falco.webserver.sslCertificate }}
# If keep_alive is set to true, the program will be started once and
# continuously written to, with each output message on its own
# line. If keep_alive is set to false, the program will be re-spawned
# for each output message.
#
# Also, the program will be closed and reopened if falco is signaled with
# SIGUSR1.
program_output:
enabled: {{ .Values.falco.programOutput.enabled }}
keep_alive: {{ .Values.falco.programOutput.keepAlive }}
program: |
{{ .Values.falco.programOutput.program | indent 8 }}
http_output:
enabled: {{ if .Values.falcosidekick.enabled }}true{{ else }}{{ .Values.falco.httpOutput.enabled }}{{ end }}
url: '{{ if .Values.falco.httpOutput.url }}{{ .Values.falco.httpOutput.url }}{{ else }}http://{{ template "falco.fullname" . }}-falcosidekick{{ if .Values.falcosidekick.fullfqdn }}.{{ .Release.Namespace }}.svc.cluster.local{{ end }}:{{ .Values.falcosidekick.listenport | default "2801" }}{{ end }}'
user_agent: {{ .Values.falco.httpOutput.userAgent }}
# Falco supports running a gRPC server with two main binding types
# 1. Over the network with mandatory mutual TLS authentication (mTLS)
# 2. Over a local unix socket with no authentication
# By default, the gRPC server is disabled, with no enabled services (see grpc_output)
# please comment/uncomment and change accordingly the options below to configure it.
# Important note: if Falco has any troubles creating the gRPC server
# this information will be logged, however the main Falco daemon will not be stopped.
# gRPC server over network with (mandatory) mutual TLS configuration.
# This gRPC server is secure by default so you need to generate certificates and update their paths here.
# By default the gRPC server is off.
# You can configure the address to bind and expose it.
# By modifying the threadiness configuration you can fine-tune the number of threads (and context) it will use.
# grpc:
# enabled: true
# bind_address: "0.0.0.0:5060"
# # when threadiness is 0, Falco sets it by automatically figuring out the number of online cores
# threadiness: 0
# private_key: "/etc/falco/certs/server.key"
# cert_chain: "/etc/falco/certs/server.crt"
# root_certs: "/etc/falco/certs/ca.crt"
grpc:
enabled: {{ .Values.falco.grpc.enabled }}
threadiness: {{ .Values.falco.grpc.threadiness }}
{{- if .Values.falco.grpc.unixSocketPath }}
bind_address: "{{ .Values.falco.grpc.unixSocketPath }}"
{{ else }}
bind_address: "0.0.0.0:{{ .Values.falco.grpc.listenPort }}"
private_key: {{ .Values.falco.grpc.privateKey }}
cert_chain: {{ .Values.falco.grpc.certChain }}
root_certs: {{ .Values.falco.grpc.rootCerts }}
{{- end }}
# gRPC output service.
# By default it is off.
# By enabling this all the output events will be kept in memory until you read them with a gRPC client.
# Make sure to have a consumer for them or leave this disabled.
grpc_output:
enabled: {{ .Values.falco.grpcOutput.enabled }}
# Container orchestrator metadata fetching params
metadata_download:
max_mb: {{ .Values.falco.metadataDownload.maxMb }}
chunk_wait_us: {{ .Values.falco.metadataDownload.chunkWaitUs }}
watch_freq_sec: {{ .Values.falco.metadataDownload.watchFreqSec }}
{{ (.Files.Glob "rules/*").AsConfig | indent 2 }}

View File

@ -0,0 +1,273 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: {{ template "falco.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "falco.fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: "{{ .Release.Name }}"
heritage: "{{ .Release.Service }}"
spec:
selector:
matchLabels:
app: {{ template "falco.fullname" .}}
role: security
template:
metadata:
name: {{ template "falco.fullname" .}}
labels:
app: {{ template "falco.fullname" .}}
role: security
{{- with .Values.podLabels }}
{{ toYaml . | indent 8 }}
{{- end }}
annotations:
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
checksum/rules: {{ include (print $.Template.BasePath "/configmap-rules.yaml") . | sha256sum }}
{{- if and .Values.certs (not .Values.certs.existingSecret) }}
checksum/certs: {{ include (print $.Template.BasePath "/secret-certs.yaml") . | sha256sum }}
{{- end }}
{{- if .Values.daemonset.podAnnotations }}
{{ toYaml .Values.daemonset.podAnnotations | indent 8 }}
{{- end }}
spec:
serviceAccountName: {{ template "falco.serviceAccountName" .}}
{{- if (and .Values.ebpf.enabled .Values.ebpf.settings.hostNetwork) }}
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: "{{ .Values.priorityClassName }}"
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{ toYaml . | indent 8 }}
{{- end }}
tolerations:
{{ toYaml .Values.tolerations | indent 8 }}
{{- if .Values.image.pullSecrets }}
imagePullSecrets: {{ toYaml .Values.image.pullSecrets | nindent 8 }}
{{- end }}
containers:
- name: {{ .Chart.Name }}
image: {{ template "falco.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
resources:
{{ toYaml .Values.resources | indent 12 }}
securityContext:
privileged: {{ not .Values.leastPrivileged.enabled }}
{{- if .Values.leastPrivileged.enabled }}
capabilities:
add:
- BPF
- SYS_RESOURCE
- PERFMON
{{- end }}
args:
- /usr/bin/falco
{{- if and .Values.containerd .Values.containerd.enabled }}
- --cri
- /run/containerd/containerd.sock
{{- end }}
{{- if and .Values.crio .Values.crio.enabled }}
- --cri
- /run/crio/crio.sock
{{- end }}
{{- if .Values.kubernetesSupport.enabled }}
- -K
- {{ .Values.kubernetesSupport.apiAuth }}
- -k
- {{ .Values.kubernetesSupport.apiUrl }}
{{- if .Values.kubernetesSupport.enableNodeFilter }}
- --k8s-node
- "$(FALCO_K8S_NODE_NAME)"
{{- end }}
{{- end }}
- -pk
{{- if .Values.extraArgs }}
{{ toYaml .Values.extraArgs | indent 12 }}
{{- end }}
env:
- name: FALCO_K8S_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
{{- if .Values.ebpf.enabled }}
- name: FALCO_BPF_PROBE
value: {{ .Values.ebpf.path }}
{{- end }}
{{- if .Values.proxy.httpProxy }}
- name: http_proxy
value: {{ .Values.proxy.httpProxy }}
{{- end }}
{{- if .Values.proxy.httpsProxy }}
- name: https_proxy
value: {{ .Values.proxy.httpsProxy }}
{{- end }}
{{- if .Values.proxy.noProxy }}
- name: no_proxy
value: {{ .Values.proxy.noProxy }}
{{- end }}
{{- if .Values.timezone }}
- name: TZ
value: {{ .Values.timezone }}
{{- end }}
{{- range $key, $value := .Values.daemonset.env }}
- name: "{{ $key }}"
value: "{{ $value }}"
{{- end }}
{{- if .Values.falco.webserver.enabled }}
livenessProbe:
initialDelaySeconds: {{ .Values.falco.livenessProbe.initialDelaySeconds }}
timeoutSeconds: {{ .Values.falco.livenessProbe.timeoutSeconds }}
periodSeconds: {{ .Values.falco.livenessProbe.periodSeconds }}
httpGet:
path: {{ .Values.falco.webserver.k8sHealthzEndpoint }}
port: {{ .Values.falco.webserver.listenPort }}
{{- if .Values.falco.webserver.sslEnabled }}
scheme: HTTPS
{{- end }}
readinessProbe:
initialDelaySeconds: {{ .Values.falco.readinessProbe.initialDelaySeconds }}
timeoutSeconds: {{ .Values.falco.readinessProbe.timeoutSeconds }}
periodSeconds: {{ .Values.falco.readinessProbe.periodSeconds }}
httpGet:
path: {{ .Values.falco.webserver.k8sHealthzEndpoint }}
port: {{ .Values.falco.webserver.listenPort }}
{{- if .Values.falco.webserver.sslEnabled }}
scheme: HTTPS
{{- end }}
{{- end }}
volumeMounts:
{{- if .Values.docker.enabled }}
- mountPath: /host/var/run/docker.sock
name: docker-socket
{{- end}}
{{- if .Values.containerd.enabled }}
- mountPath: /host/run/containerd/containerd.sock
name: containerd-socket
{{- end}}
{{- if and .Values.crio .Values.crio.enabled }}
- mountPath: /host/run/crio/crio.sock
name: crio-socket
{{- end}}
- mountPath: /host/dev
name: dev-fs
readOnly: true
- mountPath: /host/proc
name: proc-fs
readOnly: true
- mountPath: /host/boot
name: boot-fs
readOnly: true
- mountPath: /host/lib/modules
name: lib-modules
- mountPath: /host/usr
name: usr-fs
readOnly: true
- mountPath: /host/etc
name: etc-fs
readOnly: true
- mountPath: /etc/falco
name: config-volume
{{- if .Values.customRules }}
- mountPath: /etc/falco/rules.d
name: rules-volume
{{- end }}
{{- if and .Values.falco.grpc.enabled .Values.falco.grpc.unixSocketPath }}
- mountPath: {{ include "falco.unixSocketDir" . }}
name: grpc-socket-dir
{{- end }}
{{- if or .Values.falco.webserver.sslEnabled (and .Values.falco.grpc.enabled (not .Values.falco.grpc.unixSocketPath)) }}
- mountPath: /etc/falco/certs
name: certs-volume
readOnly: true
{{- end }}
{{- if .Values.extraVolumeMounts }}
{{ toYaml .Values.extraVolumeMounts | indent 12 }}
{{- end }}
{{- if .Values.extraInitContainers }}
initContainers:
{{ toYaml .Values.extraInitContainers | indent 12 }}
{{- end }}
volumes:
{{- if .Values.docker.enabled }}
- name: docker-socket
hostPath:
path: {{ .Values.docker.socket }}
{{- end}}
{{- if .Values.containerd.enabled }}
- name: containerd-socket
hostPath:
path: {{ .Values.containerd.socket }}
{{- end}}
{{- if and .Values.crio .Values.crio.enabled }}
- name: crio-socket
hostPath:
path: {{ .Values.crio.socket }}
{{- end}}
- name: dev-fs
hostPath:
path: /dev
- name: proc-fs
hostPath:
path: /proc
- name: boot-fs
hostPath:
path: /boot
- name: lib-modules
hostPath:
path: /lib/modules
- name: usr-fs
hostPath:
path: /usr
- name: etc-fs
hostPath:
path: /etc
- name: config-volume
configMap:
name: {{ template "falco.fullname" . }}
items:
- key: falco.yaml
path: falco.yaml
- key: falco_rules.yaml
path: falco_rules.yaml
- key: falco_rules.local.yaml
path: falco_rules.local.yaml
- key: application_rules.yaml
path: rules.available/application_rules.yaml
{{- if .Values.auditLog.enabled }}
- key: k8s_audit_rules.yaml
path: k8s_audit_rules.yaml
{{- end }}
- key: aws_cloudtrail_rules.yaml
path: aws_cloudtrail_rules.yaml
{{- if .Values.customRules }}
- name: rules-volume
configMap:
name: {{ template "falco.fullname" . }}-rules
{{- end }}
{{- if and .Values.falco.grpc.enabled .Values.falco.grpc.unixSocketPath }}
- name: grpc-socket-dir
hostPath:
path: {{ include "falco.unixSocketDir" . }}
{{- end }}
{{- if or .Values.falco.webserver.sslEnabled (and .Values.falco.grpc.enabled (not .Values.falco.grpc.unixSocketPath)) }}
- name: certs-volume
secret:
{{- if .Values.certs.existingSecret }}
secretName: {{ .Values.certs.existingSecret }}
{{- else }}
secretName: {{ template "falco.fullname" . }}-certs
{{- end }}
{{- end }}
{{- if .Values.extraVolumes }}
{{ toYaml .Values.extraVolumes | indent 8 }}
{{- end }}
updateStrategy:
{{ toYaml .Values.daemonset.updateStrategy | indent 4 }}

View File

@ -0,0 +1,35 @@
{{- if .Values.fakeEventGenerator.enabled }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "falco.fullname" . }}-event-generator
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "falco.fullname" . }}-event-generator
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: "{{ .Release.Name }}"
heritage: "{{ .Release.Service }}"
spec:
replicas: {{ .Values.fakeEventGenerator.replicas }}
selector:
matchLabels:
app: {{ template "falco.fullname" . }}-event-generator
template:
metadata:
labels:
app: {{ template "falco.fullname" . }}-event-generator
{{- with .Values.podLabels }}
{{ toYaml . | indent 8 }}
{{- end }}
spec:
serviceAccountName: {{ template "falco.serviceAccountName" .}}
containers:
- name: {{ template "falco.fullname" . }}-event-generator
securityContext:
privileged: false
image: falcosecurity/event-generator:latest
{{- with .Values.fakeEventGenerator.args }}
args:
{{ toYaml . | indent 10 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,52 @@
{{- if .Values.podSecurityPolicy.create}}
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ template "falco.fullname" . }}
labels:
app: {{ template "falco.fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: "{{ .Release.Name }}"
heritage: "{{ .Release.Service }}"
spec:
privileged: true
hostNetwork: true
allowedCapabilities: ['*']
fsGroup:
rule: RunAsAny
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes: ['*']
{{- end }}
{{- if (and .Values.podSecurityPolicy.create .Values.fakeEventGenerator.enabled) }}
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: event-generator-{{ template "falco.fullname" . }}
labels:
app: {{ template "falco.fullname" . }}-event-generator
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: "{{ .Release.Name }}"
heritage: "{{ .Release.Service }}"
spec:
privileged: false
hostNetwork: false
readOnlyRootFilesystem: true
requiredDropCapabilities:
- ALL
fsGroup:
rule: RunAsAny
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes: []
{{- end }}

View File

@ -0,0 +1,21 @@
{{- if and (not .Values.certs.existingSecret) (or .Values.falco.webserver.sslEnabled (and .Values.falco.grpc.enabled (not .Values.falco.grpc.unixSocketPath))) }}
---
apiVersion: v1
kind: Secret
metadata:
name: {{ template "falco.fullname" . }}-certs
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "falco.fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: "{{ .Release.Name }}"
heritage: "{{ .Release.Service }}"
type: Opaque
data:
{{ $key := .Values.certs.server.key }}
server.key: {{ $key | b64enc | quote }}
{{ $crt := .Values.certs.server.crt }}
server.crt: {{ $crt | b64enc | quote }}
server.pem: {{ print $key $crt | b64enc | quote }}
ca.crt: {{ .Values.certs.ca.crt | b64enc | quote }}
{{- end }}

View File

@ -0,0 +1,46 @@
{{- if and .Values.scc.create (.Capabilities.APIVersions.Has "security.openshift.io/v1") }}
apiVersion: security.openshift.io/v1
kind: SecurityContextConstraints
metadata:
annotations:
kubernetes.io/description: |
This provides the minimum requirements Falco to run in Openshift.
name: {{ template "falco.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "falco.fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: "{{ .Release.Name }}"
heritage: "{{ .Release.Service }}"
allowHostDirVolumePlugin: true
allowHostIPC: false
allowHostNetwork: true
allowHostPID: true
allowHostPorts: false
allowPrivilegeEscalation: true
allowPrivilegedContainer: true
allowedCapabilities: []
allowedUnsafeSysctls: []
defaultAddCapabilities: []
fsGroup:
type: RunAsAny
groups: []
priority: 0
readOnlyRootFilesystem: false
requiredDropCapabilities: []
runAsUser:
type: RunAsAny
seLinuxContext:
type: RunAsAny
seccompProfiles:
- '*'
supplementalGroups:
type: RunAsAny
users:
- system:serviceaccount:{{ .Release.Namespace }}:{{ template "falco.serviceAccountName" .}}
volumes:
- hostPath
- emptyDir
- secret
- configMap
{{- end }}

View File

@ -0,0 +1,44 @@
{{- if .Values.auditLog.enabled }}
kind: Service
apiVersion: v1
metadata:
name: {{ template "falco.fullname" .}}
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "falco.fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: "{{ .Release.Name }}"
heritage: "{{ .Release.Service }}"
spec:
{{- if .Values.falco.webserver.nodePort }}
type: NodePort
{{- end }}
selector:
app: {{ template "falco.fullname" .}}
ports:
- protocol: TCP
port: {{ .Values.falco.webserver.listenPort }}
{{- with .Values.falco.webserver.nodePort }}
nodePort: {{ . }}
{{- end }}
{{- end }}
{{- if and .Values.falco.grpc.enabled (not .Values.falco.grpc.unixSocketPath)}}
---
kind: Service
apiVersion: v1
metadata:
name: {{ template "falco.fullname" .}}-grpc
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "falco.fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: "{{ .Release.Name }}"
heritage: "{{ .Release.Service }}"
spec:
clusterIP: None
selector:
app: {{ template "falco.fullname" .}}
ports:
- protocol: TCP
port: {{ .Values.falco.grpc.listenPort }}
{{- end }}

View File

@ -0,0 +1,16 @@
{{- if .Values.serviceAccount.create }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "falco.serviceAccountName" .}}
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "falco.fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: "{{ .Release.Name }}"
heritage: "{{ .Release.Service }}"
{{- if .Values.serviceAccount.annotations }}
annotations:
{{ toYaml .Values.serviceAccount.annotations | indent 4 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,455 @@
# Default values for Falco.
image:
registry: docker.io
repository: falcosecurity/falco
tag: 0.31.1
pullPolicy: IfNotPresent
pullSecrets: []
docker:
enabled: true
socket: /var/run/docker.sock
containerd:
enabled: true
socket: /run/containerd/containerd.sock
crio:
enabled: true
socket: /run/crio/crio.sock
kubernetesSupport:
# Enables Kubernetes meta data collection via a connection to the Kubernetes API server.
enabled: true
# The apiAuth value is to provide the authentication method Falco should use to connect to the Kubernetes API.
# The argument's documentation from Falco is provided here for reference:
#
# <bt_file> | <cert_file>:<key_file[#password]>[:<ca_cert_file>], --k8s-api-cert <bt_file> | <cert_file>:<key_file[#password]>[:<ca_cert_file>]
# Use the provided files names to authenticate user and (optionally) verify the K8S API server identity.
# Each entry must specify full (absolute, or relative to the current directory) path to the respective file.
# Private key password is optional (needed only if key is password protected).
# CA certificate is optional. For all files, only PEM file format is supported.
# Specifying CA certificate only is obsoleted - when single entry is provided
# for this option, it will be interpreted as the name of a file containing bearer token.
# Note that the format of this command-line option prohibits use of files whose names contain
# ':' or '#' characters in the file name.
apiAuth: /var/run/secrets/kubernetes.io/serviceaccount/token
apiUrl: "https://$(KUBERNETES_SERVICE_HOST)"
# If true, only the current node (on which Falco is running) will be considered when requesting metadata of pods
# to the API server. Disabling this option may have a performance penalty on large clusters.
enableNodeFilter: true
resources:
# Although resources needed are subjective on the actual workload we provide
# a sane defaults ones. If you have more questions or concerns, please refer
# to #falco slack channel for more info about it
requests:
cpu: 100m
memory: 512Mi
limits:
cpu: 1000m
memory: 1024Mi
extraArgs: []
nodeSelector: {}
affinity: {}
rbac:
# Create and use rbac resources
create: true
podSecurityPolicy:
# Create a podSecurityPolicy
create: false
serviceAccount:
# Create and use serviceAccount resources
create: true
# Use this value as serviceAccountName
name:
annotations: {}
fakeEventGenerator:
enabled: false
args:
- run
- --loop
- ^syscall
replicas: 1
daemonset:
# Perform rolling updates by default in the DaemonSet agent
# ref: https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/
updateStrategy:
# You can also customize maxUnavailable or minReadySeconds if you
# need it
type: RollingUpdate
## Extra environment variables that will be pass onto deployment pods
env: {}
## Add aditional pod annotations on pods created by DaemonSet
podAnnotations: {}
# Additional labels to add to the pods:
# podLabels:
# key: value
podLabels: {}
# If is behind a proxy you can set the proxy server
proxy:
httpProxy:
httpsProxy:
noProxy:
# Set daemonset timezone
timezone:
# Set daemonset priorityClassName
priorityClassName:
ebpf:
# Enable eBPF support for Falco
enabled: false
path:
settings:
# Needed to enable eBPF JIT at runtime for performance reasons.
# Can be skipped if eBPF JIT is enabled from outside the container
hostNetwork: true
leastPrivileged:
# Constrain Falco with capabilities instead of running a privileged container.
# This option is only supported with the eBPF driver and a kernel >= 5.8.
# Ensure the eBPF driver is enabled (i.e., setting the `ebpf.enabled` option to true).
enabled: false
auditLog:
# true here activates the K8s Audit Log feature for Falco
enabled: false
dynamicBackend:
# true here configures an AuditSink who will receive the K8s audit logs
enabled: false
# define if auditsink client config should point to a fixed url, not the
# default webserver service
url: ""
falco:
# The location of the rules file(s). This can contain one or more paths to
# separate rules files.
rulesFile:
- /etc/falco/falco_rules.yaml
- /etc/falco/falco_rules.local.yaml
- /etc/falco/k8s_audit_rules.yaml
# - /etc/falco/aws_cloudtrail_rules.yaml
- /etc/falco/rules.d
# - /etc/falco/rules.optional.d
plugins:
- name: cloudtrail
library_path: libcloudtrail.so
init_config: ""
open_params: ""
- name: json
library_path: libjson.so
init_config: ""
# Setting this list to empty ensures that the above plugins are *not*
# loaded and enabled by default. If you want to use the above plugins,
# set a meaningful init_config/open_params for the cloudtrail plugin
# and then change this to:
# load_plugins: [cloudtrail, json]
loadPlugins: []
# If true, the times displayed in log messages and output messages
# will be in ISO 8601. By default, times are displayed in the local
# time zone, as governed by /etc/localtime.
timeFormatISO8601: false
# Whether to output events in json or text
jsonOutput: false
# When using json output, whether or not to include the "output" property
# itself (e.g. "File below a known binary directory opened for writing
# (user=root ....") in the json output.
jsonIncludeOutputProperty: true
# When using json output, whether or not to include the "tags" property
# itself in the json output. If set to true, outputs caused by rules
# with no tags will have a "tags" field set to an empty array. If set to
# false, the "tags" field will not be included in the json output at all.
jsonIncludeTagsProperty: true
# Send information logs to stderr and/or syslog Note these are *not* security
# notification logs! These are just Falco lifecycle (and possibly error) logs.
logStderr: true
logSyslog: true
# Minimum log level to include in logs. Note: these levels are
# separate from the priority field of rules. This refers only to the
# log level of Falco's internal logging. Can be one of "emergency",
# "alert", "critical", "error", "warning", "notice", "info", "debug".
logLevel: info
# Minimum rule priority level to load and run. All rules having a
# priority more severe than this level will be loaded/run. Can be one
# of "emergency", "alert", "critical", "error", "warning", "notice",
# "informational", "debug".
priority: debug
# Whether or not output to any of the output channels below is
# buffered.
bufferedOutputs: false
# Falco uses a shared buffer between the kernel and userspace to pass
# system call information. When Falco detects that this buffer is
# full and system calls have been dropped, it can take one or more of
# the following actions:
# - ignore: do nothing (default when list of actions is empty)
# - log: log a DEBUG message noting that the buffer was full
# - alert: emit a Falco alert noting that the buffer was full
# - exit: exit Falco with a non-zero rc
#
# Notice it is not possible to ignore and log/alert messages at the same time.
#
# The rate at which log/alert messages are emitted is governed by a
# token bucket. The rate corresponds to one message every 30 seconds
# with a burst of one message (by default).
#
# The messages are emitted when the percentage of dropped system calls
# with respect the number of events in the last second
# is greater than the given threshold (a double in the range [0, 1]).
#
# For debugging/testing it is possible to simulate the drops using
# the `simulate_drops: true`. In this case the threshold does not apply.
syscallEventDrops:
threshold: .1
actions:
- log
- alert
rate: .03333
maxBurst: 1
# Falco uses a shared buffer between the kernel and userspace to receive
# the events (eg., system call information) in userspace.
#
# Anyways, the underlying libraries can also timeout for various reasons.
# For example, there could have been issues while reading an event.
# Or the particular event needs to be skipped.
# Normally, it's very unlikely that Falco does not receive events consecutively.
#
# Falco is able to detect such uncommon situation.
#
# Here you can configure the maximum number of consecutive timeouts without an event
# after which you want Falco to alert.
# By default this value is set to 1000 consecutive timeouts without an event at all.
# How this value maps to a time interval depends on the CPU frequency.
syscallEventTimeouts:
maxConsecutives: 1000
# Falco continuously monitors outputs performance. When an output channel does not allow
# to deliver an alert within a given deadline, an error is reported indicating
# which output is blocking notifications.
# The timeout error will be reported to the log according to the above log_* settings.
# Note that the notification will not be discarded from the output queue; thus,
# output channels may indefinitely remain blocked.
# An output timeout error indeed indicate a misconfiguration issue or I/O problems
# that cannot be recovered by Falco and should be fixed by the user.
#
# The "output_timeout" value specifies the duration in milliseconds to wait before
# considering the deadline exceed.
#
# With a 2000ms default, the notification consumer can block the Falco output
# for up to 2 seconds without reaching the timeout.
output_timeout: 2000
# A throttling mechanism implemented as a token bucket limits the
# rate of Falco notifications. This throttling is controlled by the following configuration
# options:
# - rate: the number of tokens (i.e. right to send a notification)
# gained per second. Defaults to 1.
# - max_burst: the maximum number of tokens outstanding. Defaults to 1000.
#
# With these defaults, Falco could send up to 1000 notifications after
# an initial quiet period, and then up to 1 notification per second
# afterward. It would gain the full burst back after 1000 seconds of
# no activity.
outputs:
rate: 1
maxBurst: 1000
# Where security notifications should go.
# Multiple outputs can be enabled.
syslogOutput:
enabled: true
# If keep_alive is set to true, the file will be opened once and
# continuously written to, with each output message on its own
# line. If keep_alive is set to false, the file will be re-opened
# for each output message.
#
# Also, the file will be closed and reopened if Falco is signaled with
# SIGUSR1.
fileOutput:
enabled: false
keepAlive: false
filename: ./events.txt
stdoutOutput:
enabled: true
# Falco contains an embedded webserver that can be used to accept K8s
# Audit Events. These config options control the behavior of that
# webserver. (By default, the webserver is enabled).
webserver:
enabled: true
listenPort: 8765
nodePort: false
k8sAuditEndpoint: /k8s-audit
k8sHealthzEndpoint: /healthz
sslEnabled: false
sslCertificate: /etc/falco/certs/server.pem
livenessProbe:
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 15
readinessProbe:
initialDelaySeconds: 30
timeoutSeconds: 5
periodSeconds: 15
# Possible additional things you might want to do with program output:
# - send to a slack webhook:
# program: "\"jq '{text: .output}' | curl -d @- -X POST https://hooks.slack.com/services/XXX\""
# - logging (alternate method than syslog):
# program: logger -t falco-test
# - send over a network connection:
# program: nc host.example.com 80
# If keep_alive is set to true, the program will be started once and
# continuously written to, with each output message on its own
# line. If keep_alive is set to false, the program will be re-spawned
# for each output message.
#
# Also, the program will be closed and reopened if Falco is signaled with
# SIGUSR1.
programOutput:
enabled: false
keepAlive: false
program: mail -s "Falco Notification" someone@example.com
# program: |
# jq 'if .priority == "Emergency" or .priority == "Critical" or .priority == "Error" then
# { attachments: [{ text: .output, color: "danger" }]}
# elif .priority == "Warning" or .priority == "Notice" then
# { attachments: [{ text: .output, color: "warning" }]}
# elif .priority == "Informational" then
# { attachments: [{ text: .output, color: "good" }]}
# else
# { attachments: [{ text: .output }]}
# end' | curl -d @- -X POST https://hooks.slack.com/services/xxxxxxxxx/xxxxxxxxx/xxxxxxxxxxxxxxxxxxxxxxxx
httpOutput:
enabled: false
# When set, this will override an auto-generated URL which matches the falcosidekick Service.
# When including Falco inside a parent helm chart, you must set this since the auto-generated URL won't match (#280).
url: ""
userAgent: "falcosecurity/falco"
# Falco supports running a gRPC server with two main binding types
# 1. Over the network with mandatory mutual TLS authentication (mTLS)
# 2. Over a local unix socket with no authentication
# By default, the gRPC server is disabled, with no enabled services (see grpc_output)
# please comment/uncomment and change accordingly the options below to configure it.
# Important note: if Falco has any troubles creating the gRPC server
# this information will be logged, however the main Falco daemon will not be stopped.
# gRPC server over network with (mandatory) mutual TLS configuration.
# This gRPC server is secure by default so you need to generate certificates and update their paths here.
# By default the gRPC server is off.
# You can configure the address to bind and expose it.
# By modifying the threadiness configuration you can fine-tune the number of threads (and context) it will use.
grpc:
enabled: false
threadiness: 0
# gRPC unix socket with no authentication
unixSocketPath: "unix:///var/run/falco/falco.sock"
# gRPC over the network (mTLS) / required when unixSocketPath is empty
listenPort: 5060
privateKey: "/etc/falco/certs/server.key"
certChain: "/etc/falco/certs/server.crt"
rootCerts: "/etc/falco/certs/ca.crt"
# gRPC output service.
# By default it is off.
# By enabling this all the output events will be kept in memory until you read them with a gRPC client.
# Make sure to have a consumer for them or leave this disabled.
grpcOutput:
enabled: false
# Container orchestrator metadata fetching params
metadataDownload:
maxMb: 100
chunkWaitUs: 1000
watchFreqSec: 1
customRules:
{}
# Although Falco comes with a nice default rule set for detecting weird
# behavior in containers, our users are going to customize the run-time
# security rule sets or policies for the specific container images and
# applications they run. This feature can be handled in this section.
#
# Example:
#
# rules-traefik.yaml: |-
# [ rule body ]
# certificates used by webserver and grpc server
# paste certificate content or use helm with --set-file
# or use existing secret containing key, crt, ca as well as pem bundle
certs:
existingSecret: ""
server:
key: ""
crt: ""
ca:
crt: ""
# Allow Falco to run on Kubernetes 1.6 masters.
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
scc:
# true here enabled creation of Security Context Constraints in Openshift
create: true
# Add initContainers to Falco pod
extraInitContainers: []
# Add extra volumes to Falco daemonset
extraVolumes: []
# - name: optional-rules-volume
# configMap:
# name: falco-rules-optional
# optional: true
# items:
# - key: falco_rules.optional.yaml
# path: falco_rules.optional.yaml
# Add extra volumeMounts to Falco container in Falco daemonset
extraVolumeMounts: []
# - mountPath: /etc/falco/rules.optional.d
# name: optional-rules-volume
falcosidekick:
# enable falcosidekick deployment
enabled: false
fullfqdn: false
# for configuration values, see https://github.com/falcosecurity/charts/blob/master/falcosidekick/values.yaml

View File

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,27 @@
annotations:
artifacthub.io/changes: |
- kind: changed
description: "Update fluent-bit image to 1.9.4."
apiVersion: v1
appVersion: 1.9.4
description: Fast and lightweight log processor and forwarder or Linux, OSX and BSD
family operating systems.
home: https://fluentbit.io/
icon: https://fluentbit.io/assets/img/logo1-default.png
keywords:
- logging
- fluent-bit
- fluentd
maintainers:
- email: eduardo@calyptia.com
name: edsiper
- email: naseem@transit.app
name: naseemkullah
- email: towmeykaw@gmail.com
name: Towmeykaw
- email: steve.hipwell@gmail.com
name: stevehipwell
name: fluent-bit
sources:
- https://github.com/fluent/fluent-bit/
version: 0.20.2

View File

@ -0,0 +1,57 @@
# Fluent Bit Helm chart
[Fluent Bit](https://fluentbit.io) is a fast and lightweight log processor and forwarder or Linux, OSX and BSD family operating systems.
## Installation
To add the `fluent` helm repo, run:
```sh
helm repo add fluent https://fluent.github.io/helm-charts
```
To install a release named `fluent-bit`, run:
```sh
helm install fluent-bit fluent/fluent-bit
```
## Chart values
```sh
helm show values fluent/fluent-bit
```
## Using Lua scripts
Fluent Bit allows us to build filter to modify the incoming records using custom [Lua scripts.](https://docs.fluentbit.io/manual/pipeline/filters/lua)
### How to use Lua scripts with this Chart
First, you should add your Lua scripts to `luaScripts` in values.yaml, for example:
```yaml
luaScripts:
filter_example.lua: |
function filter_name(tag, timestamp, record)
-- put your lua code here.
end
```
After that, the Lua scripts will be ready to be used as filters. So next step is to add your Fluent bit [filter](https://docs.fluentbit.io/manual/concepts/data-pipeline/filter) to `config.filters` in values.yaml, for example:
```yaml
config:
filters: |
[FILTER]
Name lua
Match <your-tag>
script /fluent-bit/scripts/filter_example.lua
call filter_name
```
Under the hood, the chart will:
- Create a configmap using `luaScripts`.
- Add a volumeMounts for each Lua scripts using the path `/fluent-bit/scripts/<script>`.
- Add the Lua script's configmap as volume to the pod.
### Note
Remember to set the `script` attribute in the filter using `/fluent-bit/scripts/`, otherwise the file will not be found by fluent bit.

View File

@ -0,0 +1 @@
logLevel: debug

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,6 @@
Get Fluent Bit build information by running these commands:
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "fluent-bit.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 2020:2020
curl http://127.0.0.1:2020

View File

@ -0,0 +1,106 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "fluent-bit.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "fluent-bit.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "fluent-bit.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "fluent-bit.labels" -}}
helm.sh/chart: {{ include "fluent-bit.chart" . }}
{{ include "fluent-bit.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{/*
Selector labels
*/}}
{{- define "fluent-bit.selectorLabels" -}}
app.kubernetes.io/name: {{ include "fluent-bit.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "fluent-bit.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "fluent-bit.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
Ingress ApiVersion according k8s version
*/}}
{{- define "fluent-bit.ingress.apiVersion" -}}
{{- if and (.Capabilities.APIVersions.Has "networking.k8s.io/v1") (semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion) -}}
networking.k8s.io/v1
{{- else if and (.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1") (semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion) -}}
networking.k8s.io/v1beta1
{{- else -}}
extensions/v1beta1
{{- end }}
{{- end }}
{{/*
Return if ingress is stable.
*/}}
{{- define "fluent-bit.ingress.isStable" -}}
{{- eq (include "fluent-bit.ingress.apiVersion" .) "networking.k8s.io/v1" -}}
{{- end -}}
{{/*
Return if ingress supports ingressClassName.
*/}}
{{- define "fluent-bit.ingress.supportsIngressClassName" -}}
{{- or (eq (include "fluent-bit.ingress.isStable" .) "true") (and (eq (include "fluent-bit.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) -}}
{{- end -}}
{{/*
Return if ingress supports pathType.
*/}}
{{- define "fluent-bit.ingress.supportsPathType" -}}
{{- or (eq (include "fluent-bit.ingress.isStable" .) "true") (and (eq (include "fluent-bit.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) -}}
{{- end -}}
{{/*
Pdb apiVersion according k8s version and capabilities
*/}}
{{- define "fluent-bit.pdb.apiVersion" -}}
{{- if and (.Capabilities.APIVersions.Has "policy/v1") (semverCompare ">=1.21-0" .Capabilities.KubeVersion.GitVersion) -}}
policy/v1
{{- else -}}
policy/v1beta1
{{- end }}
{{- end -}}

View File

@ -0,0 +1,133 @@
{{- define "fluent-bit.pod" -}}
serviceAccountName: {{ include "fluent-bit.serviceAccountName" . }}
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: {{ .Values.priorityClassName }}
{{- end }}
{{- with .Values.podSecurityContext }}
securityContext:
{{- toYaml . | nindent 2 }}
{{- end }}
hostNetwork: {{ .Values.hostNetwork }}
dnsPolicy: {{ .Values.dnsPolicy }}
{{- with .Values.dnsConfig }}
dnsConfig:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- with .Values.hostAliases }}
hostAliases:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- with .Values.initContainers }}
initContainers:
{{- if kindIs "string" . }}
{{- tpl . $ | nindent 2 }}
{{- else }}
{{- toYaml . | nindent 2 }}
{{- end -}}
{{- end }}
containers:
- name: {{ .Chart.Name }}
{{- with .Values.securityContext }}
securityContext:
{{- toYaml . | nindent 6 }}
{{- end }}
image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- if or .Values.env .Values.envWithTpl }}
env:
{{- with .Values.env }}
{{- toYaml . | nindent 6 }}
{{- end }}
{{- range $item := .Values.envWithTpl }}
- name: {{ $item.name }}
value: {{ tpl $item.value $ | quote }}
{{- end }}
{{- end }}
{{- if .Values.envFrom }}
envFrom:
{{- toYaml .Values.envFrom | nindent 6 }}
{{- end }}
{{- if .Values.args }}
args:
{{- toYaml .Values.args | nindent 6 }}
{{- end}}
{{- if .Values.command }}
command:
{{- toYaml .Values.command | nindent 6 }}
{{- end }}
ports:
- name: http
containerPort: {{ .Values.metricsPort }}
protocol: TCP
{{- if .Values.extraPorts }}
{{- range .Values.extraPorts }}
- name: {{ .name }}
containerPort: {{ .containerPort }}
protocol: {{ .protocol }}
{{- end }}
{{- end }}
{{- with .Values.lifecycle }}
lifecycle:
{{- toYaml . | nindent 6 }}
{{- end }}
livenessProbe:
{{- toYaml .Values.livenessProbe | nindent 6 }}
readinessProbe:
{{- toYaml .Values.readinessProbe | nindent 6 }}
{{- with .Values.resources }}
resources:
{{- toYaml . | nindent 6 }}
{{- end }}
volumeMounts:
{{- toYaml .Values.volumeMounts | nindent 6 }}
{{- range $key, $val := .Values.config.extraFiles }}
- name: config
mountPath: /fluent-bit/etc/{{ $key }}
subPath: {{ $key }}
{{- end }}
{{- range $key, $value := .Values.luaScripts }}
- name: luascripts
mountPath: /fluent-bit/scripts/{{ $key }}
subPath: {{ $key }}
{{- end }}
{{- if eq .Values.kind "DaemonSet" }}
{{- toYaml .Values.daemonSetVolumeMounts | nindent 6 }}
{{- end }}
{{- if .Values.extraVolumeMounts }}
{{- toYaml .Values.extraVolumeMounts | nindent 6 }}
{{- end }}
{{- if .Values.extraContainers }}
{{- toYaml .Values.extraContainers | nindent 2 }}
{{- end }}
volumes:
- name: config
configMap:
name: {{ if .Values.existingConfigMap }}{{ .Values.existingConfigMap }}{{- else }}{{ include "fluent-bit.fullname" . }}{{- end }}
{{- if gt (len .Values.luaScripts) 0 }}
- name: luascripts
configMap:
name: {{ include "fluent-bit.fullname" . }}-luascripts
{{- end }}
{{- if eq .Values.kind "DaemonSet" }}
{{- toYaml .Values.daemonSetVolumes | nindent 2 }}
{{- end }}
{{- if .Values.extraVolumes }}
{{- toYaml .Values.extraVolumes | nindent 2 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- end -}}

View File

@ -0,0 +1,42 @@
{{- if .Values.rbac.create -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "fluent-bit.fullname" . }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
rules:
- apiGroups:
- ""
resources:
- namespaces
- pods
{{- if .Values.rbac.nodeAccess }}
- nodes
- nodes/proxy
{{- end }}
verbs:
- get
- list
- watch
{{- if .Values.podSecurityPolicy.create }}
- apiGroups:
- policy
resources:
- podsecuritypolicies
resourceNames:
- {{ include "fluent-bit.fullname" . }}
verbs:
- use
{{- end }}
{{- if and .Values.openShift.enabled .Values.openShift.securityContextConstraints.create }}
- apiGroups:
- security.openshift.io
resources:
- securitycontextconstraints
resourceNames:
- {{ include "fluent-bit.fullname" . }}
verbs:
- use
{{- end }}
{{- end -}}

View File

@ -0,0 +1,16 @@
{{- if .Values.rbac.create -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "fluent-bit.fullname" . }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ include "fluent-bit.fullname" . }}
subjects:
- kind: ServiceAccount
name: {{ include "fluent-bit.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
{{- end -}}

View File

@ -0,0 +1,23 @@
{{- if .Values.dashboards.enabled -}}
{{- range $path, $_ := .Files.Glob "dashboards/*.json" }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "fluent-bit.fullname" $ }}-dashboard-{{ trimSuffix ".json" (base $path) }}
{{- with $.Values.dashboards.namespace }}
namespace: {{ . }}
{{- end }}
{{- with $.Values.dashboards.annotations }}
annotations:
{{- toYaml . | nindent 4 -}}
{{- end }}
labels:
{{- include "fluent-bit.labels" $ | nindent 4 }}
{{ $.Values.dashboards.labelKey }}: "1"
data:
{{ base $path }}: |
{{- tpl ($.Files.Get $path) $ | nindent 4 }}
---
{{- end }}
{{- end -}}

View File

@ -0,0 +1,12 @@
{{- if gt (len .Values.luaScripts) 0 -}}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "fluent-bit.fullname" . }}-luascripts
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
data:
{{ range $key, $value := .Values.luaScripts }}
{{ $key }}: {{ $value | quote }}
{{ end }}
{{- end -}}

View File

@ -0,0 +1,20 @@
{{- if (empty .Values.existingConfigMap) -}}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "fluent-bit.fullname" . }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
data:
custom_parsers.conf: |
{{- (tpl .Values.config.customParsers $) | nindent 4 }}
fluent-bit.conf: |
{{- (tpl .Values.config.service $) | nindent 4 }}
{{- (tpl .Values.config.inputs $) | nindent 4 }}
{{- (tpl .Values.config.filters $) | nindent 4 }}
{{- (tpl .Values.config.outputs $) | nindent 4 }}
{{- range $key, $val := .Values.config.extraFiles }}
{{ $key }}: |
{{- (tpl $val $) | nindent 4 }}
{{- end }}
{{- end -}}

View File

@ -0,0 +1,38 @@
{{- if eq .Values.kind "DaemonSet" }}
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: {{ include "fluent-bit.fullname" . }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
{{- with .Values.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
selector:
matchLabels:
{{- include "fluent-bit.selectorLabels" . | nindent 6 }}
{{- with .Values.updateStrategy }}
updateStrategy:
{{- toYaml . | nindent 4 }}
{{- end }}
template:
metadata:
annotations:
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
checksum/luascripts: {{ include (print $.Template.BasePath "/configmap-luascripts.yaml") . | sha256sum }}
{{- with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "fluent-bit.selectorLabels" . | nindent 8 }}
{{- with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- include "fluent-bit.pod" . | nindent 6 }}
{{- end }}

View File

@ -0,0 +1,41 @@
{{- if eq .Values.kind "Deployment" }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "fluent-bit.fullname" . }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
{{- with .Values.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if not .Values.autoscaling.enabled }}
replicas: {{ .Values.replicaCount }}
{{- end }}
{{- with .Values.updateStrategy }}
strategy:
{{- toYaml . | nindent 4 }}
{{- end }}
selector:
matchLabels:
{{- include "fluent-bit.selectorLabels" . | nindent 6 }}
template:
metadata:
annotations:
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
checksum/luascripts: {{ include (print $.Template.BasePath "/configmap-luascripts.yaml") . | sha256sum }}
{{- with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "fluent-bit.selectorLabels" . | nindent 8 }}
{{- with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- include "fluent-bit.pod" . | nindent 6 }}
{{- end }}

View File

@ -0,0 +1,39 @@
{{- if and ( eq .Values.kind "Deployment" ) .Values.autoscaling.enabled }}
apiVersion: autoscaling/v2beta2
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "fluent-bit.fullname" . }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
spec:
{{- if .Values.autoscaling.behavior }}
behavior:
{{- toYaml .Values.autoscaling.behavior | nindent 4 }}
{{- end }}
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "fluent-bit.fullname" . }}
minReplicas: {{ .Values.autoscaling.minReplicas }}
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
metrics:
{{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
- type: Resource
resource:
name: cpu
target:
averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
type: Utilization
{{- end }}
{{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
- type: Resource
resource:
name: memory
target:
averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
type: Utilization
{{- end }}
{{- if .Values.autoscaling.customRules -}}
{{- toYaml .Values.autoscaling.customRules | nindent 4}}
{{- end -}}
{{- end }}

View File

@ -0,0 +1,62 @@
{{- $ingressApiIsStable := eq (include "fluent-bit.ingress.isStable" .) "true" -}}
{{- $ingressSupportsIngressClassName := eq (include "fluent-bit.ingress.supportsIngressClassName" .) "true" -}}
{{- $ingressSupportsPathType := eq (include "fluent-bit.ingress.supportsPathType" .) "true" -}}
{{- $fullName := include "fluent-bit.fullname" . -}}
{{- $svcPort := .Values.service.port -}}
{{- if and ( eq .Values.kind "Deployment" ) .Values.ingress.enabled }}
apiVersion: {{ include "fluent-bit.ingress.apiVersion" . }}
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if and $ingressSupportsIngressClassName .Values.ingress.ingressClassName }}
ingressClassName: {{ .Values.ingress.ingressClassName }}
{{- end -}}
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
{{- with .secretName }}
secretName: {{ . }}
{{- end }}
{{- end }}
{{- end }}
rules:
{{- range concat .Values.ingress.hosts .Values.ingress.extraHosts }}
- host: {{ .host | quote }}
http:
paths:
- path: /
{{- if $ingressSupportsPathType }}
pathType: Prefix
{{- end }}
backend:
{{- if $ingressApiIsStable }}
service:
name: {{ $fullName }}
port:
{{- if .port }}
number: {{ .port }}
{{- else }}
number: {{ $svcPort }}
{{- end }}
{{- else }}
serviceName: {{ $fullName }}
{{- if .port }}
servicePort: {{ .port }}
{{- else }}
servicePort: {{ $svcPort }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,22 @@
{{- if .Values.networkPolicy.enabled }}
apiVersion: "networking.k8s.io/v1"
kind: "NetworkPolicy"
metadata:
name: {{ include "fluent-bit.fullname" . | quote }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
spec:
policyTypes:
- "Ingress"
podSelector:
matchLabels:
{{- include "fluent-bit.selectorLabels" . | nindent 6 }}
ingress:
{{- with .Values.networkPolicy.ingress }}
- from:
{{- with .from }}{{- . | toYaml | nindent 8 }}{{- else }} []{{- end }}
ports:
- protocol: "TCP"
port: {{ $.Values.service.port }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,20 @@
{{- if and ( eq .Values.kind "Deployment" ) .Values.podDisruptionBudget.enabled }}
apiVersion: {{ include "fluent-bit.pdb.apiVersion" . }}
kind: PodDisruptionBudget
metadata:
name: {{ include "fluent-bit.fullname" . }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
{{- with .Values.podDisruptionBudget.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }}
selector:
matchLabels:
{{- include "fluent-bit.selectorLabels" . | nindent 6 }}
{{- with .Values.labels }}
{{- toYaml . | nindent 6 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,20 @@
{{- if and ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) .Values.prometheusRule.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: {{ include "fluent-bit.fullname" . }}
{{- with .Values.prometheusRule.namespace }}
namespace: {{ . }}
{{- end }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
{{- if .Values.prometheusRule.additionalLabels }}
{{- toYaml .Values.prometheusRule.additionalLabels | nindent 4 }}
{{- end }}
spec:
{{- if .Values.prometheusRule.rules }}
groups:
- name: {{ template "fluent-bit.name" . }}
rules: {{- toYaml .Values.prometheusRule.rules | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,42 @@
{{- if .Values.podSecurityPolicy.create }}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ include "fluent-bit.fullname" . }}
{{- if .Values.podSecurityPolicy.annotations }}
annotations:
{{- toYaml .Values.podSecurityPolicy.annotations | nindent 4 }}
{{- end }}
spec:
privileged: false
# Required to prevent escalations to root.
allowPrivilegeEscalation: false
# This is redundant with non-root + disallow privilege escalation,
# but we can provide it for defense in depth.
requiredDropCapabilities:
- ALL
volumes:
- '*'
hostNetwork: {{ .Values.hostNetwork }}
hostIPC: false
hostPID: false
runAsUser:
# TODO: Require the container to run without root privileges.
rule: 'RunAsAny'
seLinux:
# This policy assumes the nodes are using AppArmor rather than SELinux.
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
# Forbid adding the root group.
- min: 1
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
# Forbid adding the root group.
- min: 1
max: 65535
readOnlyRootFilesystem: false
{{- end }}

View File

@ -0,0 +1,37 @@
{{- if and .Values.openShift.enabled .Values.openShift.securityContextConstraints.create }}
apiVersion: security.openshift.io/v1
kind: SecurityContextConstraints
metadata:
name: {{ include "fluent-bit.fullname" . }}
{{- if .Values.openShift.securityContextConstraints.annotations }}
annotations:
{{- toYaml .Values.openShift.securityContextConstraints.annotations | nindent 4 }}
{{- end }}
allowPrivilegedContainer: true
allowPrivilegeEscalation: true
allowHostDirVolumePlugin: true
defaultAllowPrivilegeEscalation: false
# forbid host namespaces
allowHostNetwork: false
allowHostIPC: false
allowHostPorts: false
allowHostPID: false
allowedCapabilities: []
forbiddenSysctls:
- "*"
readOnlyRootFilesystem: false
requiredDropCapabilities:
- MKNOD
runAsUser:
type: RunAsAny
seLinuxContext:
type: MustRunAs
supplementalGroups:
type: RunAsAny
volumes:
- configMap
- emptyDir
- hostPath
- persistentVolumeClaim
- secret
{{- end }}

View File

@ -0,0 +1,39 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "fluent-bit.fullname" . }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
{{- with .Values.service.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.service.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
type: {{ .Values.service.type }}
{{- if and (eq .Values.service.type "ClusterIP") (.Values.service.clusterIP) }}
clusterIP: {{ .Values.service.clusterIP }}
{{- end }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
{{- if and (eq .Values.service.type "NodePort") (.Values.service.nodePort) }}
nodePort: {{ .Values.service.nodePort }}
{{- end }}
{{- if .Values.extraPorts }}
{{- range .Values.extraPorts }}
- name: {{ .name }}
targetPort: {{ .name }}
protocol: {{ .protocol }}
port: {{ .port }}
{{- if and (eq $.Values.service.type "NodePort") (.nodePort) }}
nodePort: {{ .nodePort }}
{{- end }}
{{- end }}
{{- end }}
selector:
{{- include "fluent-bit.selectorLabels" . | nindent 4 }}

View File

@ -0,0 +1,12 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "fluent-bit.serviceAccountName" . }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end -}}

View File

@ -0,0 +1,45 @@
{{- if and ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) .Values.serviceMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "fluent-bit.fullname" . }}
{{- with .Values.serviceMonitor.namespace }}
namespace: {{ . }}
{{- end }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
{{- with .Values.serviceMonitor.selector }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.serviceMonitor.jobLabel }}
jobLabel: {{ .Values.serviceMonitor.jobLabel }}
{{- end }}
endpoints:
- port: http
path: /api/v1/metrics/prometheus
{{- with .Values.serviceMonitor.interval }}
interval: {{ . }}
{{- end }}
{{- with .Values.serviceMonitor.scrapeTimeout }}
scrapeTimeout: {{ . }}
{{- end }}
{{- with .Values.serviceMonitor.metricRelabelings }}
metricRelabelings:
{{- if kindIs "string" . }}
{{- tpl . $ | nindent 8 }}
{{- else }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}
{{- with .Values.serviceMonitor.relabelings }}
relabelings:
{{- toYaml . | nindent 8 }}
{{- end }}
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
selector:
matchLabels:
{{- include "fluent-bit.selectorLabels" . | nindent 6 }}
{{- end }}

View File

@ -0,0 +1,20 @@
apiVersion: v1
kind: Pod
metadata:
name: "{{ include "fluent-bit.fullname" . }}-test-connection"
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": test-success
spec:
containers:
- name: wget
image: "{{ .Values.testFramework.image.repository }}:{{ .Values.testFramework.image.tag }}"
imagePullPolicy: {{ .Values.testFramework.image.pullPolicy }}
command: ['wget']
args: ['{{ include "fluent-bit.fullname" . }}:{{ .Values.service.port }}']
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 4 }}
{{- end }}
restartPolicy: Never

View File

@ -0,0 +1,401 @@
# Default values for fluent-bit.
# kind -- DaemonSet or Deployment
kind: DaemonSet
# replicaCount -- Only applicable if kind=Deployment
replicaCount: 1
image:
repository: cr.fluentbit.io/fluent/fluent-bit
# Overrides the image tag whose default is {{ .Chart.AppVersion }}
tag: ""
pullPolicy: Always
testFramework:
image:
repository: busybox
pullPolicy: Always
tag: latest
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
create: true
annotations: {}
name:
rbac:
create: true
nodeAccess: false
podSecurityPolicy:
create: false
annotations: {}
openShift:
# Sets Openshift support
enabled: false
# Creates SCC for Fluent-bit when Openshift support is enabled
securityContextConstraints:
create: true
annotations: {}
podSecurityContext: {}
# fsGroup: 2000
hostNetwork: false
dnsPolicy: ClusterFirst
dnsConfig: {}
# nameservers:
# - 1.2.3.4
# searches:
# - ns1.svc.cluster-domain.example
# - my.dns.search.suffix
# options:
# - name: ndots
# value: "2"
# - name: edns0
hostAliases: []
# - ip: "1.2.3.4"
# hostnames:
# - "foo.local"
# - "bar.local"
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 2020
labels: {}
# nodePort: 30020
# clusterIP: 172.16.10.1
annotations: {}
# prometheus.io/path: "/api/v1/metrics/prometheus"
# prometheus.io/port: "2020"
# prometheus.io/scrape: "true"
serviceMonitor:
enabled: false
# namespace: monitoring
# interval: 10s
# scrapeTimeout: 10s
# jobLabel: fluentbit
# selector:
# prometheus: my-prometheus
# ## metric relabel configs to apply to samples before ingestion.
# ##
# metricRelabelings:
# - sourceLabels: [__meta_kubernetes_service_label_cluster]
# targetLabel: cluster
# regex: (.*)
# replacement: ${1}
# action: replace
# ## relabel configs to apply to samples after ingestion.
# ##
# relabelings:
# - sourceLabels: [__meta_kubernetes_pod_node_name]
# separator: ;
# regex: ^(.*)$
# targetLabel: nodename
# replacement: $1
# action: replace
prometheusRule:
enabled: false
# namespace: ""
# additionalLabels: {}
# rules:
# - alert: NoOutputBytesProcessed
# expr: rate(fluentbit_output_proc_bytes_total[5m]) == 0
# annotations:
# message: |
# Fluent Bit instance {{ $labels.instance }}'s output plugin {{ $labels.name }} has not processed any
# bytes for at least 15 minutes.
# summary: No Output Bytes Processed
# for: 15m
# labels:
# severity: critical
dashboards:
enabled: false
labelKey: grafana_dashboard
annotations: {}
namespace: ""
lifecycle: {}
# preStop:
# exec:
# command: ["/bin/sh", "-c", "sleep 20"]
livenessProbe:
httpGet:
path: /
port: http
readinessProbe:
httpGet:
path: /api/v1/health
port: http
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
## only available if kind is Deployment
ingress:
enabled: false
className: ""
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts: []
# - host: fluent-bit.example.tld
extraHosts: []
# - host: fluent-bit-extra.example.tld
## specify extraPort number
# port: 5170
tls: []
# - secretName: fluent-bit-example-tld
# hosts:
# - fluent-bit.example.tld
## only available if kind is Deployment
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 3
targetCPUUtilizationPercentage: 75
# targetMemoryUtilizationPercentage: 75
## see https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/#autoscaling-on-multiple-metrics-and-custom-metrics
customRules: []
# - type: Pods
# pods:
# metric:
# name: packets-per-second
# target:
# type: AverageValue
# averageValue: 1k
## see https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior
behavior: {}
# scaleDown:
# policies:
# - type: Pods
# value: 4
# periodSeconds: 60
# - type: Percent
# value: 10
# periodSeconds: 60
## only available if kind is Deployment
podDisruptionBudget:
enabled: false
annotations: {}
maxUnavailable: "30%"
nodeSelector: {}
tolerations: []
affinity: {}
labels: {}
annotations: {}
podAnnotations: {}
podLabels: {}
priorityClassName: ""
env: []
# - name: FOO
# value: "bar"
# The envWithTpl array below has the same usage as "env", but is using the tpl function to support templatable string.
# This can be useful when you want to pass dynamic values to the Chart using the helm argument "--set <variable>=<value>"
# https://helm.sh/docs/howto/charts_tips_and_tricks/#using-the-tpl-function
envWithTpl: []
# - name: FOO_2
# value: "{{ .Values.foo2 }}"
#
# foo2: bar2
envFrom: []
extraContainers: []
# - name: do-something
# image: busybox
# command: ['do', 'something']
flush: 1
metricsPort: 2020
extraPorts: []
# - port: 5170
# containerPort: 5170
# protocol: TCP
# name: tcp
# nodePort: 30517
extraVolumes: []
extraVolumeMounts: []
updateStrategy: {}
# type: RollingUpdate
# rollingUpdate:
# maxUnavailable: 1
# Make use of a pre-defined configmap instead of the one templated here
existingConfigMap: ""
networkPolicy:
enabled: false
# ingress:
# from: []
luaScripts: {}
## https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/configuration-file
config:
service: |
[SERVICE]
Daemon Off
Flush {{ .Values.flush }}
Log_Level {{ .Values.logLevel }}
Parsers_File parsers.conf
Parsers_File custom_parsers.conf
HTTP_Server On
HTTP_Listen 0.0.0.0
HTTP_Port {{ .Values.metricsPort }}
Health_Check On
## https://docs.fluentbit.io/manual/pipeline/inputs
inputs: |
[INPUT]
Name tail
Path /var/log/containers/*.log
multiline.parser docker, cri
Tag kube.*
Mem_Buf_Limit 5MB
Skip_Long_Lines On
[INPUT]
Name systemd
Tag host.*
Systemd_Filter _SYSTEMD_UNIT=kubelet.service
Read_From_Tail On
## https://docs.fluentbit.io/manual/pipeline/filters
filters: |
[FILTER]
Name kubernetes
Match kube.*
Merge_Log On
Keep_Log Off
K8S-Logging.Parser On
K8S-Logging.Exclude On
## https://docs.fluentbit.io/manual/pipeline/outputs
outputs: |
[OUTPUT]
Name es
Match kube.*
Host elasticsearch-master
Logstash_Format On
Retry_Limit False
[OUTPUT]
Name es
Match host.*
Host elasticsearch-master
Logstash_Format On
Logstash_Prefix node
Retry_Limit False
## https://docs.fluentbit.io/manual/pipeline/parsers
customParsers: |
[PARSER]
Name docker_no_time
Format json
Time_Keep Off
Time_Key time
Time_Format %Y-%m-%dT%H:%M:%S.%L
# This allows adding more files with arbitary filenames to /fluent-bit/etc by providing key/value pairs.
# The key becomes the filename, the value becomes the file content.
extraFiles: {}
# example.conf: |
# [OUTPUT]
# Name example
# Match foo.*
# Host bar
# The config volume is mounted by default, either to the existingConfigMap value, or the default of "fluent-bit.fullname"
volumeMounts:
- name: config
mountPath: /fluent-bit/etc/fluent-bit.conf
subPath: fluent-bit.conf
- name: config
mountPath: /fluent-bit/etc/custom_parsers.conf
subPath: custom_parsers.conf
daemonSetVolumes:
- name: varlog
hostPath:
path: /var/log
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
- name: etcmachineid
hostPath:
path: /etc/machine-id
type: File
daemonSetVolumeMounts:
- name: varlog
mountPath: /var/log
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
- name: etcmachineid
mountPath: /etc/machine-id
readOnly: true
args: []
command: []
# This supports either a structured array or a templatable string
initContainers: []
# Array mode
# initContainers:
# - name: do-something
# image: bitnami/kubectl:1.22
# command: ['kubectl', 'version']
# String mode
# initContainers: |-
# - name: do-something
# image: bitnami/kubectl:{{ .Capabilities.KubeVersion.Major }}.{{ .Capabilities.KubeVersion.Minor }}
# command: ['kubectl', 'version']
logLevel: info

View File

@ -0,0 +1,26 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
# helm/charts
OWNERS
hack/
ci/
kube-prometheus-*.tgz

View File

@ -0,0 +1,12 @@
# Contributing Guidelines
## How to contribute to this chart
1. Fork this repository, develop and test your Chart.
1. Bump the chart version for every change.
1. Ensure PR title has the prefix `[kube-prometheus-stack]`
1. When making changes to rules or dashboards, see the README.md section on how to sync data from upstream repositories
1. Check the `hack/minikube` folder has scripts to set up minikube and components of this chart that will allow all components to be scraped. You can use this configuration when validating your changes.
1. Check for changes of RBAC rules.
1. Check for changes in CRD specs.
1. PR must pass the linter (`helm lint`)

View File

@ -0,0 +1,12 @@
dependencies:
- name: kube-state-metrics
repository: https://charts.helm.sh/stable
version: 2.9.4
- name: prometheus-node-exporter
repository: https://prometheus-community.github.io/helm-charts
version: 1.12.0
- name: grafana
repository: https://grafana.github.io/helm-charts
version: 6.1.16
digest: sha256:501848912e007b99631a6cd03347c17f9b661c9645d571da9f633a85c095df31
generated: "2021-01-04T16:02:02.806098+01:00"

View File

@ -0,0 +1,50 @@
annotations:
artifacthub.io/links: |
- name: Chart Source
url: https://github.com/prometheus-community/helm-charts
- name: Upstream Project
url: https://github.com/prometheus-operator/kube-prometheus
artifacthub.io/operator: "true"
apiVersion: v2
appVersion: 0.44.0
dependencies:
- condition: kubeStateMetrics.enabled
name: kube-state-metrics
repository: https://charts.helm.sh/stable
version: 2.9.*
- condition: nodeExporter.enabled
name: prometheus-node-exporter
repository: https://prometheus-community.github.io/helm-charts
version: 1.12.*
- condition: grafana.enabled
name: grafana
repository: https://grafana.github.io/helm-charts
version: 6.1.*
description: kube-prometheus-stack collects Kubernetes manifests, Grafana dashboards,
and Prometheus rules combined with documentation and scripts to provide easy to
operate end-to-end Kubernetes cluster monitoring with Prometheus using the Prometheus
Operator.
home: https://github.com/prometheus-operator/kube-prometheus
icon: https://raw.githubusercontent.com/prometheus/prometheus.github.io/master/assets/prometheus_logo-cb55bb5c346.png
keywords:
- operator
- prometheus
- kube-prometheus
kubeVersion: '>=1.16.0-0'
maintainers:
- name: vsliouniaev
- name: bismarck
- email: gianrubio@gmail.com
name: gianrubio
- email: github.gkarthiks@gmail.com
name: gkarthiks
- email: scott@r6by.com
name: scottrigby
- email: miroslav.hadzhiev@gmail.com
name: Xtigyro
name: kube-prometheus-stack
sources:
- https://github.com/prometheus-community/helm-charts
- https://github.com/prometheus-operator/kube-prometheus
type: application
version: 12.10.0

View File

@ -0,0 +1,396 @@
# kube-prometheus-stack
Installs the [kube-prometheus stack](https://github.com/prometheus-operator/kube-prometheus), a collection of Kubernetes manifests, [Grafana](http://grafana.com/) dashboards, and [Prometheus rules](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/) combined with documentation and scripts to provide easy to operate end-to-end Kubernetes cluster monitoring with [Prometheus](https://prometheus.io/) using the [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator).
See the [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus) README for details about components, dashboards, and alerts.
_Note: This chart was formerly named `prometheus-operator` chart, now renamed to more clearly reflect that it installs the `kube-prometheus` project stack, within which Prometheus Operator is only one component._
## Prerequisites
- Kubernetes 1.16+
- Helm 3+
## Get Repo Info
```console
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
helm repo add stable https://charts.helm.sh/stable
helm repo update
```
_See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._
## Install Chart
```console
# Helm
$ helm install [RELEASE_NAME] prometheus-community/kube-prometheus-stack
```
_See [configuration](#configuration) below._
_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._
## Dependencies
By default this chart installs additional, dependent charts:
- [stable/kube-state-metrics](https://github.com/helm/charts/tree/master/stable/kube-state-metrics)
- [prometheus-community/prometheus-node-exporter](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-node-exporter)
- [grafana/grafana](https://github.com/grafana/helm-charts/tree/main/charts/grafana)
To disable dependencies during installation, see [multiple releases](#multiple-releases) below.
_See [helm dependency](https://helm.sh/docs/helm/helm_dependency/) for command documentation._
## Uninstall Chart
```console
# Helm
$ helm uninstall [RELEASE_NAME]
```
This removes all the Kubernetes components associated with the chart and deletes the release.
_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._
CRDs created by this chart are not removed by default and should be manually cleaned up:
```console
kubectl delete crd alertmanagerconfigs.monitoring.coreos.com
kubectl delete crd alertmanagers.monitoring.coreos.com
kubectl delete crd podmonitors.monitoring.coreos.com
kubectl delete crd probes.monitoring.coreos.com
kubectl delete crd prometheuses.monitoring.coreos.com
kubectl delete crd prometheusrules.monitoring.coreos.com
kubectl delete crd servicemonitors.monitoring.coreos.com
kubectl delete crd thanosrulers.monitoring.coreos.com
```
## Upgrading Chart
```console
# Helm
$ helm upgrade [RELEASE_NAME] prometheus-community/kube-prometheus-stack
```
With Helm v3, CRDs created by this chart are not updated by default and should be manually updated.
Consult also the [Helm Documentation on CRDs](https://helm.sh/docs/chart_best_practices/custom_resource_definitions).
_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._
### Upgrading an existing Release to a new major version
A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an incompatible breaking change needing manual actions.
### From 11.x to 12.x
The chart was migrated to support only helm v3 and later.
### From 10.x to 11.x
Version 11 upgrades prometheus-operator from 0.42.x to 0.43.x. Starting with 0.43.x an additional `AlertmanagerConfigs` CRD is introduced. Helm does not automatically upgrade or install new CRDs on a chart upgrade, so you have to install the CRD manually before updating:
```console
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/release-0.43/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
```
Version 11 removes the deprecated tlsProxy via ghostunnel in favor of native TLS support the prometheus-operator gained with v0.39.0.
### From 9.x to 10.x
Version 10 upgrades prometheus-operator from 0.38.x to 0.42.x. Starting with 0.40.x an additional `Probes` CRD is introduced. Helm does not automatically upgrade or install new CRDs on a chart upgrade, so you have to install the CRD manually before updating:
```console
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/release-0.42/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
```
### From 8.x to 9.x
Version 9 of the helm chart removes the existing `additionalScrapeConfigsExternal` in favour of `additionalScrapeConfigsSecret`. This change lets users specify the secret name and secret key to use for the additional scrape configuration of prometheus. This is useful for users that have prometheus-operator as a subchart and also have a template that creates the additional scrape configuration.
### From 7.x to 8.x
Due to new template functions being used in the rules in version 8.x.x of the chart, an upgrade to Prometheus Operator and Prometheus is necessary in order to support them. First, upgrade to the latest version of 7.x.x
```console
helm upgrade [RELEASE_NAME] prometheus-community/kube-prometheus-stack --version 7.5.0
```
Then upgrade to 8.x.x
```console
helm upgrade [RELEASE_NAME] prometheus-community/kube-prometheus-stack --version [8.x.x]
```
Minimal recommended Prometheus version for this chart release is `2.12.x`
### From 6.x to 7.x
Due to a change in grafana subchart, version 7.x.x now requires Helm >= 2.12.0.
### From 5.x to 6.x
Due to a change in deployment labels of kube-state-metrics, the upgrade requires `helm upgrade --force` in order to re-create the deployment. If this is not done an error will occur indicating that the deployment cannot be modified:
```console
invalid: spec.selector: Invalid value: v1.LabelSelector{MatchLabels:map[string]string{"app.kubernetes.io/name":"kube-state-metrics"}, MatchExpressions:[]v1.LabelSelectorRequirement(nil)}: field is immutable
```
If this error has already been encountered, a `helm history` command can be used to determine which release has worked, then `helm rollback` to the release, then `helm upgrade --force` to this new one
## Configuration
See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments:
```console
helm show values prometheus-community/kube-prometheus-stack
```
You may also `helm show values` on this chart's [dependencies](#dependencies) for additional options.
### Multiple releases
The same chart can be used to run multiple Prometheus instances in the same cluster if required. To achieve this, it is necessary to run only one instance of prometheus-operator and a pair of alertmanager pods for an HA configuration, while all other components need to be disabled. To disable a dependency during installation, set `kubeStateMetrics.enabled`, `nodeExporter.enabled` and `grafana.enabled` to `false`.
## Work-Arounds for Known Issues
### Running on private GKE clusters
When Google configure the control plane for private clusters, they automatically configure VPC peering between your Kubernetes clusters network and a separate Google managed project. In order to restrict what Google are able to access within your cluster, the firewall rules configured restrict access to your Kubernetes pods. This means that in order to use the webhook component with a GKE private cluster, you must configure an additional firewall rule to allow the GKE control plane access to your webhook pod.
You can read more information on how to add firewall rules for the GKE control plane nodes in the [GKE docs](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters#add_firewall_rules)
Alternatively, you can disable the hooks by setting `prometheusOperator.admissionWebhooks.enabled=false`.
## PrometheusRules Admission Webhooks
With Prometheus Operator version 0.30+, the core Prometheus Operator pod exposes an endpoint that will integrate with the `validatingwebhookconfiguration` Kubernetes feature to prevent malformed rules from being added to the cluster.
### How the Chart Configures the Hooks
A validating and mutating webhook configuration requires the endpoint to which the request is sent to use TLS. It is possible to set up custom certificates to do this, but in most cases, a self-signed certificate is enough. The setup of this component requires some more complex orchestration when using helm. The steps are created to be idempotent and to allow turning the feature on and off without running into helm quirks.
1. A pre-install hook provisions a certificate into the same namespace using a format compatible with provisioning using end-user certificates. If the certificate already exists, the hook exits.
2. The prometheus operator pod is configured to use a TLS proxy container, which will load that certificate.
3. Validating and Mutating webhook configurations are created in the cluster, with their failure mode set to Ignore. This allows rules to be created by the same chart at the same time, even though the webhook has not yet been fully set up - it does not have the correct CA field set.
4. A post-install hook reads the CA from the secret created by step 1 and patches the Validating and Mutating webhook configurations. This process will allow a custom CA provisioned by some other process to also be patched into the webhook configurations. The chosen failure policy is also patched into the webhook configurations
### Alternatives
It should be possible to use [jetstack/cert-manager](https://github.com/jetstack/cert-manager) if a more complete solution is required, but it has not been tested.
### Limitations
Because the operator can only run as a single pod, there is potential for this component failure to cause rule deployment failure. Because this risk is outweighed by the benefit of having validation, the feature is enabled by default.
## Developing Prometheus Rules and Grafana Dashboards
This chart Grafana Dashboards and Prometheus Rules are just a copy from [prometheus-operator/prometheus-operator](https://github.com/prometheus-operator/prometheus-operator) and other sources, synced (with alterations) by scripts in [hack](hack) folder. In order to introduce any changes you need to first [add them to the original repo](https://github.com/prometheus-operator/kube-prometheus/blob/master/docs/developing-prometheus-rules-and-grafana-dashboards.md) and then sync there by scripts.
## Further Information
For more in-depth documentation of configuration options meanings, please see
- [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator)
- [Prometheus](https://prometheus.io/docs/introduction/overview/)
- [Grafana](https://github.com/grafana/helm-charts/tree/main/charts/grafana#grafana-helm-chart)
## prometheus.io/scrape
The prometheus operator does not support annotation-based discovery of services, using the `PodMonitor` or `ServiceMonitor` CRD in its place as they provide far more configuration options.
For information on how to use PodMonitors/ServiceMonitors, please see the documentation on the `prometheus-operator/prometheus-operator` documentation here:
- [ServiceMonitors](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/getting-started.md#include-servicemonitors)
- [PodMonitors](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/getting-started.md#include-podmonitors)
- [Running Exporters](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/running-exporters.md)
By default, Prometheus discovers PodMonitors and ServiceMonitors within its namespace, that are labeled with the same release tag as the prometheus-operator release.
Sometimes, you may need to discover custom PodMonitors/ServiceMonitors, for example used to scrape data from third-party applications.
An easy way of doing this, without compromising the default PodMonitors/ServiceMonitors discovery, is allowing Prometheus to discover all PodMonitors/ServiceMonitors within its namespace, without applying label filtering.
To do so, you can set `prometheus.prometheusSpec.podMonitorSelectorNilUsesHelmValues` and `prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues` to `false`.
## Migrating from stable/prometheus-operator chart
## Zero downtime
Since `kube-prometheus-stack` is fully compatible with the `stable/prometheus-operator` chart, a migration without downtime can be achieved.
However, the old name prefix needs to be kept. If you want the new name please follow the step by step guide below (with downtime).
You can override the name to achieve this:
```console
helm upgrade prometheus-operator prometheus-community/kube-prometheus-stack -n monitoring --reuse-values --set nameOverride=prometheus-operator
```
**Note**: It is recommended to run this first with `--dry-run --debug`.
## Redeploy with new name (downtime)
If the **prometheus-operator** values are compatible with the new **kube-prometheus-stack** chart, please follow the below steps for migration:
> The guide presumes that chart is deployed in `monitoring` namespace and the deployments are running there. If in other namespace, please replace the `monitoring` to the deployed namespace.
1. Patch the PersistenceVolume created/used by the prometheus-operator chart to `Retain` claim policy:
```console
kubectl patch pv/<PersistentVolume name> -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}'
```
**Note:** To execute the above command, the user must have a cluster wide permission. Please refer [Kubernetes RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/)
2. Uninstall the **prometheus-operator** release and delete the existing PersistentVolumeClaim, and verify PV become Released.
```console
helm uninstall prometheus-operator -n monitoring
kubectl delete pvc/<PersistenceVolumeClaim name> -n monitoring
```
Additionally, you have to manually remove the remaining `prometheus-operator-kubelet` service.
```console
kubectl delete service/prometheus-operator-kubelet -n kube-system
```
You can choose to remove all your existing CRDs (ServiceMonitors, Podmonitors, etc.) if you want to.
3. Remove current `spec.claimRef` values to change the PV's status from Released to Available.
```console
kubectl patch pv/<PersistentVolume name> --type json -p='[{"op": "remove", "path": "/spec/claimRef"}]' -n monitoring
```
**Note:** To execute the above command, the user must have a cluster wide permission. Please refer to [Kubernetes RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/)
After these steps, proceed to a fresh **kube-prometheus-stack** installation and make sure the current release of **kube-prometheus-stack** matching the `volumeClaimTemplate` values in the `values.yaml`.
The binding is done via matching a specific amount of storage requested and with certain access modes.
For example, if you had storage specified as this with **prometheus-operator**:
```yaml
volumeClaimTemplate:
spec:
storageClassName: gp2
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 50Gi
```
You have to specify matching `volumeClaimTemplate` with 50Gi storage and `ReadWriteOnce` access mode.
Additionally, you should check the current AZ of your legacy installation's PV, and configure the fresh release to use the same AZ as the old one. If the pods are in a different AZ than the PV, the release will fail to bind the existing one, hence creating a new PV.
This can be achieved either by specifying the labels through `values.yaml`, e.g. setting `prometheus.prometheusSpec.nodeSelector` to:
```yaml
nodeSelector:
failure-domain.beta.kubernetes.io/zone: east-west-1a
```
or passing these values as `--set` overrides during installation.
The new release should now re-attach your previously released PV with its content.
## Migrating from coreos/prometheus-operator chart
The multiple charts have been combined into a single chart that installs prometheus operator, prometheus, alertmanager, grafana as well as the multitude of exporters necessary to monitor a cluster.
There is no simple and direct migration path between the charts as the changes are extensive and intended to make the chart easier to support.
The capabilities of the old chart are all available in the new chart, including the ability to run multiple prometheus instances on a single cluster - you will need to disable the parts of the chart you do not wish to deploy.
You can check out the tickets for this change [here](https://github.com/prometheus-operator/prometheus-operator/issues/592) and [here](https://github.com/helm/charts/pull/6765).
### High-level overview of Changes
#### Added dependencies
The chart has added 3 [dependencies](#dependencies).
- Node-Exporter, Kube-State-Metrics: These components are loaded as dependencies into the chart, and are relatively simple components
- Grafana: The Grafana chart is more feature-rich than this chart - it contains a sidecar that is able to load data sources and dashboards from configmaps deployed into the same cluster. For more information check out the [documentation for the chart](https://github.com/helm/charts/tree/master/stable/grafana)
#### Kubelet Service
Because the kubelet service has a new name in the chart, make sure to clean up the old kubelet service in the `kube-system` namespace to prevent counting container metrics twice.
#### Persistent Volumes
If you would like to keep the data of the current persistent volumes, it should be possible to attach existing volumes to new PVCs and PVs that are created using the conventions in the new chart. For example, in order to use an existing Azure disk for a helm release called `prometheus-migration` the following resources can be created:
```yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: pvc-prometheus-migration-prometheus-0
spec:
accessModes:
- ReadWriteOnce
azureDisk:
cachingMode: None
diskName: pvc-prometheus-migration-prometheus-0
diskURI: /subscriptions/f5125d82-2622-4c50-8d25-3f7ba3e9ac4b/resourceGroups/sample-migration-resource-group/providers/Microsoft.Compute/disks/pvc-prometheus-migration-prometheus-0
fsType: ""
kind: Managed
readOnly: false
capacity:
storage: 1Gi
persistentVolumeReclaimPolicy: Delete
storageClassName: prometheus
volumeMode: Filesystem
```
```yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
labels:
app: prometheus
prometheus: prometheus-migration-prometheus
name: prometheus-prometheus-migration-prometheus-db-prometheus-prometheus-migration-prometheus-0
namespace: monitoring
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: prometheus
volumeMode: Filesystem
volumeName: pvc-prometheus-migration-prometheus-0
```
The PVC will take ownership of the PV and when you create a release using a persistent volume claim template it will use the existing PVCs as they match the naming convention used by the chart. For other cloud providers similar approaches can be used.
#### KubeProxy
The metrics bind address of kube-proxy is default to `127.0.0.1:10249` that prometheus instances **cannot** access to. You should expose metrics by changing `metricsBindAddress` field value to `0.0.0.0:10249` if you want to collect them.
Depending on the cluster, the relevant part `config.conf` will be in ConfigMap `kube-system/kube-proxy` or `kube-system/kube-proxy-config`. For example:
```console
kubectl -n kube-system edit cm kube-proxy
```
```yaml
apiVersion: v1
data:
config.conf: |-
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
# ...
# metricsBindAddress: 127.0.0.1:10249
metricsBindAddress: 0.0.0.0:10249
# ...
kubeconfig.conf: |-
# ...
kind: ConfigMap
metadata:
labels:
app: kube-proxy
name: kube-proxy
namespace: kube-system
```

View File

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.vscode
.project
.idea/
*.tmproj
OWNERS

View File

@ -0,0 +1,22 @@
apiVersion: v2
appVersion: 7.3.5
description: The leading tool for querying and visualizing time series and metrics.
home: https://grafana.net
icon: https://raw.githubusercontent.com/grafana/grafana/master/public/img/logo_transparent_400x.png
kubeVersion: ^1.8.0-0
maintainers:
- email: zanhsieh@gmail.com
name: zanhsieh
- email: rluckie@cisco.com
name: rtluckie
- email: maor.friedman@redhat.com
name: maorfr
- email: miroslav.hadzhiev@gmail.com
name: Xtigyro
- email: mail@torstenwalter.de
name: torstenwalter
name: grafana
sources:
- https://github.com/grafana/grafana
type: application
version: 6.1.16

View File

@ -0,0 +1,523 @@
# Grafana Helm Chart
* Installs the web dashboarding system [Grafana](http://grafana.org/)
## Get Repo Info
```console
helm repo add grafana https://grafana.github.io/helm-charts
helm repo update
```
_See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._
## Installing the Chart
To install the chart with the release name `my-release`:
```console
helm install my-release grafana/grafana
```
## Uninstalling the Chart
To uninstall/delete the my-release deployment:
```console
helm delete my-release
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Upgrading an existing Release to a new major version
A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an
incompatible breaking change needing manual actions.
### To 4.0.0 (And 3.12.1)
This version requires Helm >= 2.12.0.
### To 5.0.0
You have to add --force to your helm upgrade command as the labels of the chart have changed.
### To 6.0.0
This version requires Helm >= 3.1.0.
## Configuration
| Parameter | Description | Default |
|-------------------------------------------|-----------------------------------------------|---------------------------------------------------------|
| `replicas` | Number of nodes | `1` |
| `podDisruptionBudget.minAvailable` | Pod disruption minimum available | `nil` |
| `podDisruptionBudget.maxUnavailable` | Pod disruption maximum unavailable | `nil` |
| `deploymentStrategy` | Deployment strategy | `{ "type": "RollingUpdate" }` |
| `livenessProbe` | Liveness Probe settings | `{ "httpGet": { "path": "/api/health", "port": 3000 } "initialDelaySeconds": 60, "timeoutSeconds": 30, "failureThreshold": 10 }` |
| `readinessProbe` | Readiness Probe settings | `{ "httpGet": { "path": "/api/health", "port": 3000 } }`|
| `securityContext` | Deployment securityContext | `{"runAsUser": 472, "runAsGroup": 472, "fsGroup": 472}` |
| `priorityClassName` | Name of Priority Class to assign pods | `nil` |
| `image.repository` | Image repository | `grafana/grafana` |
| `image.tag` | Image tag (`Must be >= 5.0.0`) | `7.0.3` |
| `image.sha` | Image sha (optional) | `17cbd08b9515fda889ca959e9d72ee6f3327c8f1844a3336dfd952134f38e2fe` |
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
| `image.pullSecrets` | Image pull secrets | `{}` |
| `service.type` | Kubernetes service type | `ClusterIP` |
| `service.port` | Kubernetes port where service is exposed | `80` |
| `service.portName` | Name of the port on the service | `service` |
| `service.targetPort` | Internal service is port | `3000` |
| `service.nodePort` | Kubernetes service nodePort | `nil` |
| `service.annotations` | Service annotations | `{}` |
| `service.labels` | Custom labels | `{}` |
| `service.clusterIP` | internal cluster service IP | `nil` |
| `service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `nil` |
| `service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to lb (if supported) | `[]` |
| `service.externalIPs` | service external IP addresses | `[]` |
| `extraExposePorts` | Additional service ports for sidecar containers| `[]` |
| `hostAliases` | adds rules to the pod's /etc/hosts | `[]` |
| `ingress.enabled` | Enables Ingress | `false` |
| `ingress.annotations` | Ingress annotations (values are templated) | `{}` |
| `ingress.labels` | Custom labels | `{}` |
| `ingress.path` | Ingress accepted path | `/` |
| `ingress.hosts` | Ingress accepted hostnames | `["chart-example.local"]` |
| `ingress.extraPaths` | Ingress extra paths to prepend to every host configuration. Useful when configuring [custom actions with AWS ALB Ingress Controller](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/guide/ingress/annotation/#actions). | `[]` |
| `ingress.tls` | Ingress TLS configuration | `[]` |
| `resources` | CPU/Memory resource requests/limits | `{}` |
| `nodeSelector` | Node labels for pod assignment | `{}` |
| `tolerations` | Toleration labels for pod assignment | `[]` |
| `affinity` | Affinity settings for pod assignment | `{}` |
| `extraInitContainers` | Init containers to add to the grafana pod | `{}` |
| `extraContainers` | Sidecar containers to add to the grafana pod | `{}` |
| `extraContainerVolumes` | Volumes that can be mounted in sidecar containers | `[]` |
| `schedulerName` | Name of the k8s scheduler (other than default) | `nil` |
| `persistence.enabled` | Use persistent volume to store data | `false` |
| `persistence.type` | Type of persistence (`pvc` or `statefulset`) | `pvc` |
| `persistence.size` | Size of persistent volume claim | `10Gi` |
| `persistence.existingClaim` | Use an existing PVC to persist data | `nil` |
| `persistence.storageClassName` | Type of persistent volume claim | `nil` |
| `persistence.accessModes` | Persistence access modes | `[ReadWriteOnce]` |
| `persistence.annotations` | PersistentVolumeClaim annotations | `{}` |
| `persistence.finalizers` | PersistentVolumeClaim finalizers | `[ "kubernetes.io/pvc-protection" ]` |
| `persistence.subPath` | Mount a sub dir of the persistent volume | `nil` |
| `initChownData.enabled` | If false, don't reset data ownership at startup | true |
| `initChownData.image.repository` | init-chown-data container image repository | `busybox` |
| `initChownData.image.tag` | init-chown-data container image tag | `1.31.1` |
| `initChownData.image.sha` | init-chown-data container image sha (optional)| `""` |
| `initChownData.image.pullPolicy` | init-chown-data container image pull policy | `IfNotPresent` |
| `initChownData.resources` | init-chown-data pod resource requests & limits | `{}` |
| `schedulerName` | Alternate scheduler name | `nil` |
| `env` | Extra environment variables passed to pods | `{}` |
| `envValueFrom` | Environment variables from alternate sources. See the API docs on [EnvVarSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#envvarsource-v1-core) for format details. | `{}` |
| `envFromSecret` | Name of a Kubernetes secret (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `""` |
| `envRenderSecret` | Sensible environment variables passed to pods and stored as secret | `{}` |
| `extraSecretMounts` | Additional grafana server secret mounts | `[]` |
| `extraVolumeMounts` | Additional grafana server volume mounts | `[]` |
| `extraConfigmapMounts` | Additional grafana server configMap volume mounts | `[]` |
| `extraEmptyDirMounts` | Additional grafana server emptyDir volume mounts | `[]` |
| `plugins` | Plugins to be loaded along with Grafana | `[]` |
| `datasources` | Configure grafana datasources (passed through tpl) | `{}` |
| `notifiers` | Configure grafana notifiers | `{}` |
| `dashboardProviders` | Configure grafana dashboard providers | `{}` |
| `dashboards` | Dashboards to import | `{}` |
| `dashboardsConfigMaps` | ConfigMaps reference that contains dashboards | `{}` |
| `grafana.ini` | Grafana's primary configuration | `{}` |
| `ldap.enabled` | Enable LDAP authentication | `false` |
| `ldap.existingSecret` | The name of an existing secret containing the `ldap.toml` file, this must have the key `ldap-toml`. | `""` |
| `ldap.config` | Grafana's LDAP configuration | `""` |
| `annotations` | Deployment annotations | `{}` |
| `labels` | Deployment labels | `{}` |
| `podAnnotations` | Pod annotations | `{}` |
| `podLabels` | Pod labels | `{}` |
| `podPortName` | Name of the grafana port on the pod | `grafana` |
| `sidecar.image.repository` | Sidecar image repository | `kiwigrid/k8s-sidecar` |
| `sidecar.image.tag` | Sidecar image tag | `1.1.0` |
| `sidecar.image.sha` | Sidecar image sha (optional) | `""` |
| `sidecar.imagePullPolicy` | Sidecar image pull policy | `IfNotPresent` |
| `sidecar.resources` | Sidecar resources | `{}` |
| `sidecar.enableUniqueFilenames` | Sets the kiwigrid/k8s-sidecar UNIQUE_FILENAMES environment variable | `false` |
| `sidecar.dashboards.enabled` | Enables the cluster wide search for dashboards and adds/updates/deletes them in grafana | `false` |
| `sidecar.dashboards.SCProvider` | Enables creation of sidecar provider | `true` |
| `sidecar.dashboards.provider.name` | Unique name of the grafana provider | `sidecarProvider` |
| `sidecar.dashboards.provider.orgid` | Id of the organisation, to which the dashboards should be added | `1` |
| `sidecar.dashboards.provider.folder` | Logical folder in which grafana groups dashboards | `""` |
| `sidecar.dashboards.provider.disableDelete` | Activate to avoid the deletion of imported dashboards | `false` |
| `sidecar.dashboards.provider.allowUiUpdates` | Allow updating provisioned dashboards from the UI | `false` |
| `sidecar.dashboards.provider.type` | Provider type | `file` |
| `sidecar.dashboards.provider.foldersFromFilesStructure` | Allow Grafana to replicate dashboard structure from filesystem. | `false` |
| `sidecar.dashboards.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` |
| `sidecar.skipTlsVerify` | Set to true to skip tls verification for kube api calls | `nil` |
| `sidecar.dashboards.label` | Label that config maps with dashboards should have to be added | `grafana_dashboard` |
| `sidecar.dashboards.folder` | Folder in the pod that should hold the collected dashboards (unless `sidecar.dashboards.defaultFolderName` is set). This path will be mounted. | `/tmp/dashboards` |
| `sidecar.dashboards.folderAnnotation` | The annotation the sidecar will look for in configmaps to override the destination folder for files | `nil` |
| `sidecar.dashboards.defaultFolderName` | The default folder name, it will create a subfolder under the `sidecar.dashboards.folder` and put dashboards in there instead | `nil` |
| `sidecar.dashboards.searchNamespace` | If specified, the sidecar will search for dashboard config-maps inside this namespace. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces | `nil` |
| `sidecar.datasources.enabled` | Enables the cluster wide search for datasources and adds/updates/deletes them in grafana |`false` |
| `sidecar.datasources.label` | Label that config maps with datasources should have to be added | `grafana_datasource` |
| `sidecar.datasources.searchNamespace` | If specified, the sidecar will search for datasources config-maps inside this namespace. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces | `nil` |
| `sidecar.notifiers.enabled` | Enables the cluster wide search for notifiers and adds/updates/deletes them in grafana | `false` |
| `sidecar.notifiers.label` | Label that config maps with notifiers should have to be added | `grafana_notifier` |
| `sidecar.notifiers.searchNamespace` | If specified, the sidecar will search for notifiers config-maps (or secrets) inside this namespace. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces | `nil` |
| `smtp.existingSecret` | The name of an existing secret containing the SMTP credentials. | `""` |
| `smtp.userKey` | The key in the existing SMTP secret containing the username. | `"user"` |
| `smtp.passwordKey` | The key in the existing SMTP secret containing the password. | `"password"` |
| `admin.existingSecret` | The name of an existing secret containing the admin credentials. | `""` |
| `admin.userKey` | The key in the existing admin secret containing the username. | `"admin-user"` |
| `admin.passwordKey` | The key in the existing admin secret containing the password. | `"admin-password"` |
| `serviceAccount.annotations` | ServiceAccount annotations | |
| `serviceAccount.create` | Create service account | `true` |
| `serviceAccount.name` | Service account name to use, when empty will be set to created account if `serviceAccount.create` is set else to `default` | `` |
| `serviceAccount.nameTest` | Service account name to use for test, when empty will be set to created account if `serviceAccount.create` is set else to `default` | `nil` |
| `rbac.create` | Create and use RBAC resources | `true` |
| `rbac.namespaced` | Creates Role and Rolebinding instead of the default ClusterRole and ClusteRoleBindings for the grafana instance | `false` |
| `rbac.useExistingRole` | Set to a rolename to use existing role - skipping role creating - but still doing serviceaccount and rolebinding to the rolename set here. | `nil` |
| `rbac.pspEnabled` | Create PodSecurityPolicy (with `rbac.create`, grant roles permissions as well) | `true` |
| `rbac.pspUseAppArmor` | Enforce AppArmor in created PodSecurityPolicy (requires `rbac.pspEnabled`) | `true` |
| `rbac.extraRoleRules` | Additional rules to add to the Role | [] |
| `rbac.extraClusterRoleRules` | Additional rules to add to the ClusterRole | [] |
| `command` | Define command to be executed by grafana container at startup | `nil` |
| `testFramework.enabled` | Whether to create test-related resources | `true` |
| `testFramework.image` | `test-framework` image repository. | `bats/bats` |
| `testFramework.tag` | `test-framework` image tag. | `v1.1.0` |
| `testFramework.imagePullPolicy` | `test-framework` image pull policy. | `IfNotPresent` |
| `testFramework.securityContext` | `test-framework` securityContext | `{}` |
| `downloadDashboards.env` | Environment variables to be passed to the `download-dashboards` container | `{}` |
| `downloadDashboards.resources` | Resources of `download-dashboards` container | `{}` |
| `downloadDashboardsImage.repository` | Curl docker image repo | `curlimages/curl` |
| `downloadDashboardsImage.tag` | Curl docker image tag | `7.73.0` |
| `downloadDashboardsImage.sha` | Curl docker image sha (optional) | `""` |
| `downloadDashboardsImage.pullPolicy` | Curl docker image pull policy | `IfNotPresent` |
| `namespaceOverride` | Override the deployment namespace | `""` (`Release.Namespace`) |
| `serviceMonitor.enabled` | Use servicemonitor from prometheus operator | `false` |
| `serviceMonitor.namespace` | Namespace this servicemonitor is installed in | |
| `serviceMonitor.interval` | How frequently Prometheus should scrape | `1m` |
| `serviceMonitor.path` | Path to scrape | `/metrics` |
| `serviceMonitor.scheme` | Scheme to use for metrics scraping | `http` |
| `serviceMonitor.tlsConfig` | TLS configuration block for the endpoint | `{}` |
| `serviceMonitor.labels` | Labels for the servicemonitor passed to Prometheus Operator | `{}` |
| `serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `30s` |
| `serviceMonitor.relabelings` | MetricRelabelConfigs to apply to samples before ingestion. | `[]` |
| `revisionHistoryLimit` | Number of old ReplicaSets to retain | `10` |
| `imageRenderer.enabled` | Enable the image-renderer deployment & service | `false` |
| `imageRenderer.image.repository` | image-renderer Image repository | `grafana/grafana-image-renderer` |
| `imageRenderer.image.tag` | image-renderer Image tag | `latest` |
| `imageRenderer.image.sha` | image-renderer Image sha (optional) | `""` |
| `imageRenderer.image.pullPolicy` | image-renderer ImagePullPolicy | `Always` |
| `imageRenderer.env` | extra env-vars for image-renderer | `{}` |
| `imageRenderer.securityContext` | image-renderer deployment securityContext | `{}` |
| `imageRenderer.hostAliases` | image-renderer deployment Host Aliases | `[]` |
| `imageRenderer.priorityClassName` | image-renderer deployment priority class | `''` |
| `imageRenderer.service.portName` | image-renderer service port name | `'http'` |
| `imageRenderer.service.port` | image-renderer service port used by both service and deployment | `8081` |
| `imageRenderer.podPortName` | name of the image-renderer port on the pod | `http` |
| `imageRenderer.revisionHistoryLimit` | number of image-renderer replica sets to keep | `10` |
| `imageRenderer.networkPolicy.limitIngress` | Enable a NetworkPolicy to limit inbound traffic from only the created grafana pods | `true` |
| `imageRenderer.networkPolicy.limitEgress` | Enable a NetworkPolicy to limit outbound traffic to only the created grafana pods | `false` |
| `imageRenderer.resources` | Set resource limits for image-renderer pdos | `{}` |
### Example ingress with path
With grafana 6.3 and above
```yaml
grafana.ini:
server:
domain: monitoring.example.com
root_url: "%(protocol)s://%(domain)s/grafana"
serve_from_sub_path: true
ingress:
enabled: true
hosts:
- "monitoring.example.com"
path: "/grafana"
```
### Example of extraVolumeMounts
```yaml
- extraVolumeMounts:
- name: plugins
mountPath: /var/lib/grafana/plugins
subPath: configs/grafana/plugins
existingClaim: existing-grafana-claim
readOnly: false
```
## Import dashboards
There are a few methods to import dashboards to Grafana. Below are some examples and explanations as to how to use each method:
```yaml
dashboards:
default:
some-dashboard:
json: |
{
"annotations":
...
# Complete json file here
...
"title": "Some Dashboard",
"uid": "abcd1234",
"version": 1
}
custom-dashboard:
# This is a path to a file inside the dashboards directory inside the chart directory
file: dashboards/custom-dashboard.json
prometheus-stats:
# Ref: https://grafana.com/dashboards/2
gnetId: 2
revision: 2
datasource: Prometheus
local-dashboard:
url: https://raw.githubusercontent.com/user/repository/master/dashboards/dashboard.json
```
## BASE64 dashboards
Dashboards could be stored on a server that does not return JSON directly and instead of it returns a Base64 encoded file (e.g. Gerrit)
A new parameter has been added to the url use case so if you specify a b64content value equals to true after the url entry a Base64 decoding is applied before save the file to disk.
If this entry is not set or is equals to false not decoding is applied to the file before saving it to disk.
### Gerrit use case
Gerrit API for download files has the following schema: <https://yourgerritserver/a/{project-name}/branches/{branch-id}/files/{file-id}/content> where {project-name} and
{file-id} usually has '/' in their values and so they MUST be replaced by %2F so if project-name is user/repo, branch-id is master and file-id is equals to dir1/dir2/dashboard
the url value is <https://yourgerritserver/a/user%2Frepo/branches/master/files/dir1%2Fdir2%2Fdashboard/content>
## Sidecar for dashboards
If the parameter `sidecar.dashboards.enabled` is set, a sidecar container is deployed in the grafana
pod. This container watches all configmaps (or secrets) in the cluster and filters out the ones with
a label as defined in `sidecar.dashboards.label`. The files defined in those configmaps are written
to a folder and accessed by grafana. Changes to the configmaps are monitored and the imported
dashboards are deleted/updated.
A recommendation is to use one configmap per dashboard, as a reduction of multiple dashboards inside
one configmap is currently not properly mirrored in grafana.
Example dashboard config:
```yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: sample-grafana-dashboard
labels:
grafana_dashboard: "1"
data:
k8s-dashboard.json: |-
[...]
```
## Sidecar for datasources
If the parameter `sidecar.datasources.enabled` is set, an init container is deployed in the grafana
pod. This container lists all secrets (or configmaps, though not recommended) in the cluster and
filters out the ones with a label as defined in `sidecar.datasources.label`. The files defined in
those secrets are written to a folder and accessed by grafana on startup. Using these yaml files,
the data sources in grafana can be imported. The secrets must be created before `helm install` so
that the datasources init container can list the secrets.
Secrets are recommended over configmaps for this usecase because datasources usually contain private
data like usernames and passwords. Secrets are the more appropriate cluster resource to manage those.
Example datasource config adapted from [Grafana](http://docs.grafana.org/administration/provisioning/#example-datasource-config-file):
```yaml
apiVersion: v1
kind: Secret
metadata:
name: sample-grafana-datasource
labels:
grafana_datasource: "1"
type: Opaque
stringData:
datasource.yaml: |-
# config file version
apiVersion: 1
# list of datasources that should be deleted from the database
deleteDatasources:
- name: Graphite
orgId: 1
# list of datasources to insert/update depending
# whats available in the database
datasources:
# <string, required> name of the datasource. Required
- name: Graphite
# <string, required> datasource type. Required
type: graphite
# <string, required> access mode. proxy or direct (Server or Browser in the UI). Required
access: proxy
# <int> org id. will default to orgId 1 if not specified
orgId: 1
# <string> url
url: http://localhost:8080
# <string> database password, if used
password:
# <string> database user, if used
user:
# <string> database name, if used
database:
# <bool> enable/disable basic auth
basicAuth:
# <string> basic auth username
basicAuthUser:
# <string> basic auth password
basicAuthPassword:
# <bool> enable/disable with credentials headers
withCredentials:
# <bool> mark as default datasource. Max one per org
isDefault:
# <map> fields that will be converted to json and stored in json_data
jsonData:
graphiteVersion: "1.1"
tlsAuth: true
tlsAuthWithCACert: true
# <string> json object of data that will be encrypted.
secureJsonData:
tlsCACert: "..."
tlsClientCert: "..."
tlsClientKey: "..."
version: 1
# <bool> allow users to edit datasources from the UI.
editable: false
```
## Sidecar for notifiers
If the parameter `sidecar.notifiers.enabled` is set, an init container is deployed in the grafana
pod. This container lists all secrets (or configmaps, though not recommended) in the cluster and
filters out the ones with a label as defined in `sidecar.notifiers.label`. The files defined in
those secrets are written to a folder and accessed by grafana on startup. Using these yaml files,
the notification channels in grafana can be imported. The secrets must be created before
`helm install` so that the notifiers init container can list the secrets.
Secrets are recommended over configmaps for this usecase because alert notification channels usually contain
private data like SMTP usernames and passwords. Secrets are the more appropriate cluster resource to manage those.
Example datasource config adapted from [Grafana](https://grafana.com/docs/grafana/latest/administration/provisioning/#alert-notification-channels):
```yaml
notifiers:
- name: notification-channel-1
type: slack
uid: notifier1
# either
org_id: 2
# or
org_name: Main Org.
is_default: true
send_reminder: true
frequency: 1h
disable_resolve_message: false
# See `Supported Settings` section for settings supporter for each
# alert notification type.
settings:
recipient: 'XXX'
token: 'xoxb'
uploadImage: true
url: https://slack.com
delete_notifiers:
- name: notification-channel-1
uid: notifier1
org_id: 2
- name: notification-channel-2
# default org_id: 1
```
## How to serve Grafana with a path prefix (/grafana)
In order to serve Grafana with a prefix (e.g., <http://example.com/grafana>), add the following to your values.yaml.
```yaml
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/use-regex: "true"
path: /grafana/?(.*)
hosts:
- k8s.example.dev
grafana.ini:
server:
root_url: http://localhost:3000/grafana # this host can be localhost
```
## How to securely reference secrets in grafana.ini
This example uses Grafana uses [file providers](https://grafana.com/docs/grafana/latest/administration/configuration/#file-provider) for secret values and the `extraSecretMounts` configuration flag (Additional grafana server secret mounts) to mount the secrets.
In grafana.ini:
```yaml
grafana.ini:
[auth.generic_oauth]
enabled = true
client_id = $__file{/etc/secrets/auth_generic_oauth/client_id}
client_secret = $__file{/etc/secrets/auth_generic_oauth/client_secret}
```
Existing secret, or created along with helm:
```yaml
---
apiVersion: v1
kind: Secret
metadata:
name: auth-generic-oauth-secret
type: Opaque
stringData:
client_id: <value>
client_secret: <value>
```
Include in the `extraSecretMounts` configuration flag:
```yaml
- extraSecretMounts:
- name: auth-generic-oauth-secret-mount
secretName: auth-generic-oauth-secret
defaultMode: 0440
mountPath: /etc/secrets/auth_generic_oauth
readOnly: true
```
### extraSecretMounts using a Container Storage Interface (CSI) provider
This example uses a CSI driver e.g. retrieving secrets using [Azure Key Vault Provider](https://github.com/Azure/secrets-store-csi-driver-provider-azure)
```yaml
- extraSecretMounts:
- name: secrets-store-inline
mountPath: /run/secrets
readOnly: true
csi:
driver: secrets-store.csi.k8s.io
readOnly: true
volumeAttributes:
secretProviderClass: "my-provider"
nodePublishSecretRef:
name: akv-creds
```
## Image Renderer Plug-In
This chart supports enabling [remote image rendering](https://github.com/grafana/grafana-image-renderer/blob/master/docs/remote_rendering_using_docker.md)
```yaml
imageRenderer:
enabled: true
```
### Image Renderer NetworkPolicy
By default the image-renderer pods will have a network policy which only allows ingress traffic from the created grafana instance

View File

@ -0,0 +1 @@
# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml.

View File

@ -0,0 +1,53 @@
dashboards:
my-provider:
my-awesome-dashboard:
# An empty but valid dashboard
json: |
{
"__inputs": [],
"__requires": [
{
"type": "grafana",
"id": "grafana",
"name": "Grafana",
"version": "6.3.5"
}
],
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": "-- Grafana --",
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"gnetId": null,
"graphTooltip": 0,
"id": null,
"links": [],
"panels": [],
"schemaVersion": 19,
"style": "dark",
"tags": [],
"templating": {
"list": []
},
"time": {
"from": "now-6h",
"to": "now"
},
"timepicker": {
"refresh_intervals": ["5s"]
},
"timezone": "",
"title": "Dummy Dashboard",
"uid": "IdcYQooWk",
"version": 1
}
datasource: Prometheus

View File

@ -0,0 +1,19 @@
dashboards:
my-provider:
my-awesome-dashboard:
gnetId: 10000
revision: 1
datasource: Prometheus
dashboardProviders:
dashboardproviders.yaml:
apiVersion: 1
providers:
- name: 'my-provider'
orgId: 1
folder: ''
type: file
updateIntervalSeconds: 10
disableDeletion: true
editable: true
options:
path: /var/lib/grafana/dashboards/my-provider

View File

@ -0,0 +1,19 @@
podLabels:
customLableA: Aaaaa
imageRenderer:
enabled: true
env:
RENDERING_ARGS: --disable-gpu,--window-size=1280x758
RENDERING_MODE: clustered
podLabels:
customLableB: Bbbbb
networkPolicy:
limitIngress: true
limitEgress: true
resources:
limits:
cpu: 1000m
memory: 1000Mi
requests:
cpu: 500m
memory: 50Mi

View File

@ -0,0 +1,54 @@
1. Get your '{{ .Values.adminUser }}' user password by running:
kubectl get secret --namespace {{ template "grafana.namespace" . }} {{ template "grafana.fullname" . }} -o jsonpath="{.data.admin-password}" | base64 --decode ; echo
2. The Grafana server can be accessed via port {{ .Values.service.port }} on the following DNS name from within your cluster:
{{ template "grafana.fullname" . }}.{{ template "grafana.namespace" . }}.svc.cluster.local
{{ if .Values.ingress.enabled }}
If you bind grafana to 80, please update values in values.yaml and reinstall:
```
securityContext:
runAsUser: 0
runAsGroup: 0
fsGroup: 0
command:
- "setcap"
- "'cap_net_bind_service=+ep'"
- "/usr/sbin/grafana-server &&"
- "sh"
- "/run.sh"
```
Details refer to https://grafana.com/docs/installation/configuration/#http-port.
Or grafana would always crash.
From outside the cluster, the server URL(s) are:
{{- range .Values.ingress.hosts }}
http://{{ . }}
{{- end }}
{{ else }}
Get the Grafana URL to visit by running these commands in the same shell:
{{ if contains "NodePort" .Values.service.type -}}
export NODE_PORT=$(kubectl get --namespace {{ template "grafana.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "grafana.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ template "grafana.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{ else if contains "LoadBalancer" .Values.service.type -}}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get svc --namespace {{ template "grafana.namespace" . }} -w {{ template "grafana.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ template "grafana.namespace" . }} {{ template "grafana.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
http://$SERVICE_IP:{{ .Values.service.port -}}
{{ else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ template "grafana.namespace" . }} -l "app.kubernetes.io/name={{ template "grafana.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
kubectl --namespace {{ template "grafana.namespace" . }} port-forward $POD_NAME 3000
{{- end }}
{{- end }}
3. Login with the password from step 1 and the username: {{ .Values.adminUser }}
{{- if not .Values.persistence.enabled }}
#################################################################################
###### WARNING: Persistence is disabled!!! You will lose your data when #####
###### the Grafana pod is terminated. #####
#################################################################################
{{- end }}

View File

@ -0,0 +1,113 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "grafana.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "grafana.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "grafana.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create the name of the service account
*/}}
{{- define "grafana.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "grafana.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{- define "grafana.serviceAccountNameTest" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (print (include "grafana.fullname" .) "-test") .Values.serviceAccount.nameTest }}
{{- else -}}
{{ default "default" .Values.serviceAccount.nameTest }}
{{- end -}}
{{- end -}}
{{/*
Allow the release namespace to be overridden for multi-namespace deployments in combined charts
*/}}
{{- define "grafana.namespace" -}}
{{- if .Values.namespaceOverride -}}
{{- .Values.namespaceOverride -}}
{{- else -}}
{{- .Release.Namespace -}}
{{- end -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "grafana.labels" -}}
helm.sh/chart: {{ include "grafana.chart" . }}
{{ include "grafana.selectorLabels" . }}
{{- if or .Chart.AppVersion .Values.image.tag }}
app.kubernetes.io/version: {{ .Values.image.tag | default .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{/*
Selector labels
*/}}
{{- define "grafana.selectorLabels" -}}
app.kubernetes.io/name: {{ include "grafana.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "grafana.imageRenderer.labels" -}}
helm.sh/chart: {{ include "grafana.chart" . }}
{{ include "grafana.imageRenderer.selectorLabels" . }}
{{- if or .Chart.AppVersion .Values.image.tag }}
app.kubernetes.io/version: {{ .Values.image.tag | default .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{/*
Selector labels ImageRenderer
*/}}
{{- define "grafana.imageRenderer.selectorLabels" -}}
app.kubernetes.io/name: {{ include "grafana.name" . }}-image-renderer
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{/*
Return the appropriate apiVersion for rbac.
*/}}
{{- define "rbac.apiVersion" -}}
{{- if .Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1" }}
{{- print "rbac.authorization.k8s.io/v1" -}}
{{- else -}}
{{- print "rbac.authorization.k8s.io/v1beta1" -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,467 @@
{{- define "grafana.pod" -}}
{{- if .Values.schedulerName }}
schedulerName: "{{ .Values.schedulerName }}"
{{- end }}
serviceAccountName: {{ template "grafana.serviceAccountName" . }}
{{- if .Values.securityContext }}
securityContext:
{{ toYaml .Values.securityContext | indent 2 }}
{{- end }}
{{- if .Values.hostAliases }}
hostAliases:
{{ toYaml .Values.hostAliases | indent 2 }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: {{ .Values.priorityClassName }}
{{- end }}
{{- if ( or .Values.persistence.enabled .Values.dashboards .Values.sidecar.datasources.enabled .Values.sidecar.notifiers.enabled .Values.extraInitContainers) }}
initContainers:
{{- end }}
{{- if ( and .Values.persistence.enabled .Values.initChownData.enabled ) }}
- name: init-chown-data
{{- if .Values.initChownData.image.sha }}
image: "{{ .Values.initChownData.image.repository }}:{{ .Values.initChownData.image.tag }}@sha256:{{ .Values.initChownData.image.sha }}"
{{- else }}
image: "{{ .Values.initChownData.image.repository }}:{{ .Values.initChownData.image.tag }}"
{{- end }}
imagePullPolicy: {{ .Values.initChownData.image.pullPolicy }}
securityContext:
runAsNonRoot: false
runAsUser: 0
command: ["chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.runAsGroup }}", "/var/lib/grafana"]
resources:
{{ toYaml .Values.initChownData.resources | indent 6 }}
volumeMounts:
- name: storage
mountPath: "/var/lib/grafana"
{{- if .Values.persistence.subPath }}
subPath: {{ .Values.persistence.subPath }}
{{- end }}
{{- end }}
{{- if .Values.dashboards }}
- name: download-dashboards
{{- if .Values.downloadDashboardsImage.sha }}
image: "{{ .Values.downloadDashboardsImage.repository }}:{{ .Values.downloadDashboardsImage.tag }}@sha256:{{ .Values.downloadDashboardsImage.sha }}"
{{- else }}
image: "{{ .Values.downloadDashboardsImage.repository }}:{{ .Values.downloadDashboardsImage.tag }}"
{{- end }}
imagePullPolicy: {{ .Values.downloadDashboardsImage.pullPolicy }}
command: ["/bin/sh"]
args: [ "-c", "mkdir -p /var/lib/grafana/dashboards/default && /bin/sh /etc/grafana/download_dashboards.sh" ]
resources:
{{ toYaml .Values.downloadDashboards.resources | indent 6 }}
env:
{{- range $key, $value := .Values.downloadDashboards.env }}
- name: "{{ $key }}"
value: "{{ $value }}"
{{- end }}
volumeMounts:
- name: config
mountPath: "/etc/grafana/download_dashboards.sh"
subPath: download_dashboards.sh
- name: storage
mountPath: "/var/lib/grafana"
{{- if .Values.persistence.subPath }}
subPath: {{ .Values.persistence.subPath }}
{{- end }}
{{- range .Values.extraSecretMounts }}
- name: {{ .name }}
mountPath: {{ .mountPath }}
readOnly: {{ .readOnly }}
{{- end }}
{{- end }}
{{- if .Values.sidecar.datasources.enabled }}
- name: {{ template "grafana.name" . }}-sc-datasources
{{- if .Values.sidecar.image.sha }}
image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}"
{{- else }}
image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}"
{{- end }}
imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }}
env:
- name: METHOD
value: LIST
- name: LABEL
value: "{{ .Values.sidecar.datasources.label }}"
- name: FOLDER
value: "/etc/grafana/provisioning/datasources"
- name: RESOURCE
value: "both"
{{- if .Values.sidecar.enableUniqueFilenames }}
- name: UNIQUE_FILENAMES
value: "{{ .Values.sidecar.enableUniqueFilenames }}"
{{- end }}
{{- if .Values.sidecar.datasources.searchNamespace }}
- name: NAMESPACE
value: "{{ .Values.sidecar.datasources.searchNamespace }}"
{{- end }}
{{- if .Values.sidecar.skipTlsVerify }}
- name: SKIP_TLS_VERIFY
value: "{{ .Values.sidecar.skipTlsVerify }}"
{{- end }}
resources:
{{ toYaml .Values.sidecar.resources | indent 6 }}
volumeMounts:
- name: sc-datasources-volume
mountPath: "/etc/grafana/provisioning/datasources"
{{- end}}
{{- if .Values.sidecar.notifiers.enabled }}
- name: {{ template "grafana.name" . }}-sc-notifiers
{{- if .Values.sidecar.image.sha }}
image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}"
{{- else }}
image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}"
{{- end }}
imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }}
env:
- name: METHOD
value: LIST
- name: LABEL
value: "{{ .Values.sidecar.notifiers.label }}"
- name: FOLDER
value: "/etc/grafana/provisioning/notifiers"
- name: RESOURCE
value: "both"
{{- if .Values.sidecar.enableUniqueFilenames }}
- name: UNIQUE_FILENAMES
value: "{{ .Values.sidecar.enableUniqueFilenames }}"
{{- end }}
{{- if .Values.sidecar.notifiers.searchNamespace }}
- name: NAMESPACE
value: "{{ .Values.sidecar.notifiers.searchNamespace }}"
{{- end }}
{{- if .Values.sidecar.skipTlsVerify }}
- name: SKIP_TLS_VERIFY
value: "{{ .Values.sidecar.skipTlsVerify }}"
{{- end }}
resources:
{{ toYaml .Values.sidecar.resources | indent 6 }}
volumeMounts:
- name: sc-notifiers-volume
mountPath: "/etc/grafana/provisioning/notifiers"
{{- end}}
{{- if .Values.extraInitContainers }}
{{ toYaml .Values.extraInitContainers | indent 2 }}
{{- end }}
{{- if .Values.image.pullSecrets }}
imagePullSecrets:
{{- range .Values.image.pullSecrets }}
- name: {{ . }}
{{- end}}
{{- end }}
containers:
{{- if .Values.sidecar.dashboards.enabled }}
- name: {{ template "grafana.name" . }}-sc-dashboard
{{- if .Values.sidecar.image.sha }}
image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}"
{{- else }}
image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}"
{{- end }}
imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }}
env:
- name: METHOD
value: {{ .Values.sidecar.dashboards.watchMethod }}
- name: LABEL
value: "{{ .Values.sidecar.dashboards.label }}"
- name: FOLDER
value: "{{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }}"
- name: RESOURCE
value: "both"
{{- if .Values.sidecar.enableUniqueFilenames }}
- name: UNIQUE_FILENAMES
value: "{{ .Values.sidecar.enableUniqueFilenames }}"
{{- end }}
{{- if .Values.sidecar.dashboards.searchNamespace }}
- name: NAMESPACE
value: "{{ .Values.sidecar.dashboards.searchNamespace }}"
{{- end }}
{{- if .Values.sidecar.skipTlsVerify }}
- name: SKIP_TLS_VERIFY
value: "{{ .Values.sidecar.skipTlsVerify }}"
{{- end }}
{{- if .Values.sidecar.dashboards.folderAnnotation }}
- name: FOLDER_ANNOTATION
value: "{{ .Values.sidecar.dashboards.folderAnnotation }}"
{{- end }}
resources:
{{ toYaml .Values.sidecar.resources | indent 6 }}
volumeMounts:
- name: sc-dashboard-volume
mountPath: {{ .Values.sidecar.dashboards.folder | quote }}
{{- end}}
- name: {{ .Chart.Name }}
{{- if .Values.image.sha }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}@sha256:{{ .Values.image.sha }}"
{{- else }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
{{- end }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- if .Values.command }}
command:
{{- range .Values.command }}
- {{ . }}
{{- end }}
{{- end}}
volumeMounts:
- name: config
mountPath: "/etc/grafana/grafana.ini"
subPath: grafana.ini
{{- if .Values.ldap.enabled }}
- name: ldap
mountPath: "/etc/grafana/ldap.toml"
subPath: ldap.toml
{{- end }}
{{- range .Values.extraConfigmapMounts }}
- name: {{ .name }}
mountPath: {{ .mountPath }}
subPath: {{ .subPath | default "" }}
readOnly: {{ .readOnly }}
{{- end }}
- name: storage
mountPath: "/var/lib/grafana"
{{- if .Values.persistence.subPath }}
subPath: {{ .Values.persistence.subPath }}
{{- end }}
{{- if .Values.dashboards }}
{{- range $provider, $dashboards := .Values.dashboards }}
{{- range $key, $value := $dashboards }}
{{- if (or (hasKey $value "json") (hasKey $value "file")) }}
- name: dashboards-{{ $provider }}
mountPath: "/var/lib/grafana/dashboards/{{ $provider }}/{{ $key }}.json"
subPath: "{{ $key }}.json"
{{- end }}
{{- end }}
{{- end }}
{{- end -}}
{{- if .Values.dashboardsConfigMaps }}
{{- range (keys .Values.dashboardsConfigMaps | sortAlpha) }}
- name: dashboards-{{ . }}
mountPath: "/var/lib/grafana/dashboards/{{ . }}"
{{- end }}
{{- end }}
{{- if .Values.datasources }}
- name: config
mountPath: "/etc/grafana/provisioning/datasources/datasources.yaml"
subPath: datasources.yaml
{{- end }}
{{- if .Values.notifiers }}
- name: config
mountPath: "/etc/grafana/provisioning/notifiers/notifiers.yaml"
subPath: notifiers.yaml
{{- end }}
{{- if .Values.dashboardProviders }}
- name: config
mountPath: "/etc/grafana/provisioning/dashboards/dashboardproviders.yaml"
subPath: dashboardproviders.yaml
{{- end }}
{{- if .Values.sidecar.dashboards.enabled }}
- name: sc-dashboard-volume
mountPath: {{ .Values.sidecar.dashboards.folder | quote }}
{{ if .Values.sidecar.dashboards.SCProvider }}
- name: sc-dashboard-provider
mountPath: "/etc/grafana/provisioning/dashboards/sc-dashboardproviders.yaml"
subPath: provider.yaml
{{- end}}
{{- end}}
{{- if .Values.sidecar.datasources.enabled }}
- name: sc-datasources-volume
mountPath: "/etc/grafana/provisioning/datasources"
{{- end}}
{{- if .Values.sidecar.notifiers.enabled }}
- name: sc-notifiers-volume
mountPath: "/etc/grafana/provisioning/notifiers"
{{- end}}
{{- range .Values.extraSecretMounts }}
- name: {{ .name }}
mountPath: {{ .mountPath }}
readOnly: {{ .readOnly }}
subPath: {{ .subPath | default "" }}
{{- end }}
{{- range .Values.extraVolumeMounts }}
- name: {{ .name }}
mountPath: {{ .mountPath }}
subPath: {{ .subPath | default "" }}
readOnly: {{ .readOnly }}
{{- end }}
{{- range .Values.extraEmptyDirMounts }}
- name: {{ .name }}
mountPath: {{ .mountPath }}
{{- end }}
ports:
- name: {{ .Values.service.portName }}
containerPort: {{ .Values.service.port }}
protocol: TCP
- name: {{ .Values.podPortName }}
containerPort: 3000
protocol: TCP
env:
{{- if not .Values.env.GF_SECURITY_ADMIN_USER }}
- name: GF_SECURITY_ADMIN_USER
valueFrom:
secretKeyRef:
name: {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }}
key: {{ .Values.admin.userKey | default "admin-user" }}
{{- end }}
{{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) }}
- name: GF_SECURITY_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }}
key: {{ .Values.admin.passwordKey | default "admin-password" }}
{{- end }}
{{- if .Values.plugins }}
- name: GF_INSTALL_PLUGINS
valueFrom:
configMapKeyRef:
name: {{ template "grafana.fullname" . }}
key: plugins
{{- end }}
{{- if .Values.smtp.existingSecret }}
- name: GF_SMTP_USER
valueFrom:
secretKeyRef:
name: {{ .Values.smtp.existingSecret }}
key: {{ .Values.smtp.userKey | default "user" }}
- name: GF_SMTP_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Values.smtp.existingSecret }}
key: {{ .Values.smtp.passwordKey | default "password" }}
{{- end }}
{{ if .Values.imageRenderer.enabled }}
- name: GF_RENDERING_SERVER_URL
value: http://{{ template "grafana.fullname" . }}-image-renderer.{{ template "grafana.namespace" . }}:{{ .Values.imageRenderer.service.port }}/render
- name: GF_RENDERING_CALLBACK_URL
value: http://{{ template "grafana.fullname" . }}.{{ template "grafana.namespace" . }}:{{ .Values.service.port }}/
{{ end }}
{{- range $key, $value := .Values.envValueFrom }}
- name: {{ $key | quote }}
valueFrom:
{{ toYaml $value | indent 10 }}
{{- end }}
{{- range $key, $value := .Values.env }}
- name: "{{ tpl $key $ }}"
value: "{{ tpl (print $value) $ }}"
{{- end }}
{{- if .Values.envFromSecret }}
envFrom:
- secretRef:
name: {{ tpl .Values.envFromSecret . }}
{{- end }}
{{- if .Values.envRenderSecret }}
envFrom:
- secretRef:
name: {{ template "grafana.fullname" . }}-env
{{- end }}
livenessProbe:
{{ toYaml .Values.livenessProbe | indent 6 }}
readinessProbe:
{{ toYaml .Values.readinessProbe | indent 6 }}
resources:
{{ toYaml .Values.resources | indent 6 }}
{{- with .Values.extraContainers }}
{{ tpl . $ | indent 2 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{ toYaml . | indent 2 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{ toYaml . | indent 2 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{ toYaml . | indent 2 }}
{{- end }}
volumes:
- name: config
configMap:
name: {{ template "grafana.fullname" . }}
{{- range .Values.extraConfigmapMounts }}
- name: {{ .name }}
configMap:
name: {{ .configMap }}
{{- end }}
{{- if .Values.dashboards }}
{{- range (keys .Values.dashboards | sortAlpha) }}
- name: dashboards-{{ . }}
configMap:
name: {{ template "grafana.fullname" $ }}-dashboards-{{ . }}
{{- end }}
{{- end }}
{{- if .Values.dashboardsConfigMaps }}
{{ $root := . }}
{{- range $provider, $name := .Values.dashboardsConfigMaps }}
- name: dashboards-{{ $provider }}
configMap:
name: {{ tpl $name $root }}
{{- end }}
{{- end }}
{{- if .Values.ldap.enabled }}
- name: ldap
secret:
{{- if .Values.ldap.existingSecret }}
secretName: {{ .Values.ldap.existingSecret }}
{{- else }}
secretName: {{ template "grafana.fullname" . }}
{{- end }}
items:
- key: ldap-toml
path: ldap.toml
{{- end }}
{{- if and .Values.persistence.enabled (eq .Values.persistence.type "pvc") }}
- name: storage
persistentVolumeClaim:
claimName: {{ .Values.persistence.existingClaim | default (include "grafana.fullname" .) }}
{{- else if and .Values.persistence.enabled (eq .Values.persistence.type "statefulset") }}
# nothing
{{- else }}
- name: storage
emptyDir: {}
{{- end -}}
{{- if .Values.sidecar.dashboards.enabled }}
- name: sc-dashboard-volume
emptyDir: {}
{{- if .Values.sidecar.dashboards.SCProvider }}
- name: sc-dashboard-provider
configMap:
name: {{ template "grafana.fullname" . }}-config-dashboards
{{- end }}
{{- end }}
{{- if .Values.sidecar.datasources.enabled }}
- name: sc-datasources-volume
emptyDir: {}
{{- end -}}
{{- if .Values.sidecar.notifiers.enabled }}
- name: sc-notifiers-volume
emptyDir: {}
{{- end -}}
{{- range .Values.extraSecretMounts }}
{{- if .secretName }}
- name: {{ .name }}
secret:
secretName: {{ .secretName }}
defaultMode: {{ .defaultMode }}
{{- else if .projected }}
- name: {{ .name }}
projected: {{- toYaml .projected | nindent 6 }}
{{- else if .csi }}
- name: {{ .name }}
csi: {{- toYaml .csi | nindent 6 }}
{{- end }}
{{- end }}
{{- range .Values.extraVolumeMounts }}
- name: {{ .name }}
persistentVolumeClaim:
claimName: {{ .existingClaim }}
{{- end }}
{{- range .Values.extraEmptyDirMounts }}
- name: {{ .name }}
emptyDir: {}
{{- end -}}
{{- if .Values.extraContainerVolumes }}
{{ toYaml .Values.extraContainerVolumes | indent 2 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,25 @@
{{- if and .Values.rbac.create (not .Values.rbac.namespaced) (not .Values.rbac.useExistingRole) }}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- with .Values.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
name: {{ template "grafana.fullname" . }}-clusterrole
{{- if or .Values.sidecar.dashboards.enabled (or .Values.sidecar.datasources.enabled .Values.rbac.extraClusterRoleRules) }}
rules:
{{- if or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled }}
- apiGroups: [""] # "" indicates the core API group
resources: ["configmaps", "secrets"]
verbs: ["get", "watch", "list"]
{{- end}}
{{- with .Values.rbac.extraClusterRoleRules }}
{{ toYaml . | indent 0 }}
{{- end}}
{{- else }}
rules: []
{{- end}}
{{- end}}

View File

@ -0,0 +1,24 @@
{{- if and .Values.rbac.create (not .Values.rbac.namespaced) }}
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ template "grafana.fullname" . }}-clusterrolebinding
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- with .Values.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
subjects:
- kind: ServiceAccount
name: {{ template "grafana.serviceAccountName" . }}
namespace: {{ template "grafana.namespace" . }}
roleRef:
kind: ClusterRole
{{- if (not .Values.rbac.useExistingRole) }}
name: {{ template "grafana.fullname" . }}-clusterrole
{{- else }}
name: {{ .Values.rbac.useExistingRole }}
{{- end }}
apiGroup: rbac.authorization.k8s.io
{{- end -}}

View File

@ -0,0 +1,29 @@
{{- if .Values.sidecar.dashboards.enabled }}
apiVersion: v1
kind: ConfigMap
metadata:
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- with .Values.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
name: {{ template "grafana.fullname" . }}-config-dashboards
namespace: {{ template "grafana.namespace" . }}
data:
provider.yaml: |-
apiVersion: 1
providers:
- name: '{{ .Values.sidecar.dashboards.provider.name }}'
orgId: {{ .Values.sidecar.dashboards.provider.orgid }}
{{- if not .Values.sidecar.dashboards.provider.foldersFromFilesStructure }}
folder: '{{ .Values.sidecar.dashboards.provider.folder }}'
{{- end}}
type: {{ .Values.sidecar.dashboards.provider.type }}
disableDeletion: {{ .Values.sidecar.dashboards.provider.disableDelete }}
allowUiUpdates: {{ .Values.sidecar.dashboards.provider.allowUiUpdates }}
updateIntervalSeconds: {{ .Values.sidecar.dashboards.provider.updateIntervalSeconds | default 30 }}
options:
foldersFromFilesStructure: {{ .Values.sidecar.dashboards.provider.foldersFromFilesStructure }}
path: {{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }}
{{- end}}

View File

@ -0,0 +1,69 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "grafana.fullname" . }}
namespace: {{ template "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
data:
{{- if .Values.plugins }}
plugins: {{ join "," .Values.plugins }}
{{- end }}
grafana.ini: |
{{- range $key, $value := index .Values "grafana.ini" }}
[{{ $key }}]
{{- range $elem, $elemVal := $value }}
{{ $elem }} = {{ $elemVal }}
{{- end }}
{{- end }}
{{- if .Values.datasources }}
{{ $root := . }}
{{- range $key, $value := .Values.datasources }}
{{ $key }}: |
{{ tpl (toYaml $value | indent 4) $root }}
{{- end -}}
{{- end -}}
{{- if .Values.notifiers }}
{{- range $key, $value := .Values.notifiers }}
{{ $key }}: |
{{ toYaml $value | indent 4 }}
{{- end -}}
{{- end -}}
{{- if .Values.dashboardProviders }}
{{- range $key, $value := .Values.dashboardProviders }}
{{ $key }}: |
{{ toYaml $value | indent 4 }}
{{- end -}}
{{- end -}}
{{- if .Values.dashboards }}
download_dashboards.sh: |
#!/usr/bin/env sh
set -euf
{{- if .Values.dashboardProviders }}
{{- range $key, $value := .Values.dashboardProviders }}
{{- range $value.providers }}
mkdir -p {{ .options.path }}
{{- end }}
{{- end }}
{{- end }}
{{- range $provider, $dashboards := .Values.dashboards }}
{{- range $key, $value := $dashboards }}
{{- if (or (hasKey $value "gnetId") (hasKey $value "url")) }}
curl -skf \
--connect-timeout 60 \
--max-time 60 \
{{- if not $value.b64content }}
-H "Accept: application/json" \
-H "Content-Type: application/json;charset=UTF-8" \
{{ end }}
{{- if $value.url -}}"{{ $value.url }}"{{- else -}}"https://grafana.com/api/dashboards/{{ $value.gnetId }}/revisions/{{- if $value.revision -}}{{ $value.revision }}{{- else -}}1{{- end -}}/download"{{- end -}}{{ if $value.datasource }} | sed '/-- .* --/! s/"datasource":.*,/"datasource": "{{ $value.datasource }}",/g'{{ end }}{{- if $value.b64content -}} | base64 -d {{- end -}} \
> "/var/lib/grafana/dashboards/{{ $provider }}/{{ $key }}.json"
{{- end -}}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,35 @@
{{- if .Values.dashboards }}
{{ $files := .Files }}
{{- range $provider, $dashboards := .Values.dashboards }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "grafana.fullname" $ }}-dashboards-{{ $provider }}
namespace: {{ template "grafana.namespace" $ }}
labels:
{{- include "grafana.labels" $ | nindent 4 }}
dashboard-provider: {{ $provider }}
{{- if $dashboards }}
data:
{{- $dashboardFound := false }}
{{- range $key, $value := $dashboards }}
{{- if (or (hasKey $value "json") (hasKey $value "file")) }}
{{- $dashboardFound = true }}
{{ print $key | indent 2 }}.json:
{{- if hasKey $value "json" }}
|-
{{ $value.json | indent 6 }}
{{- end }}
{{- if hasKey $value "file" }}
{{ toYaml ( $files.Get $value.file ) | indent 4}}
{{- end }}
{{- end }}
{{- end }}
{{- if not $dashboardFound }}
{}
{{- end }}
{{- end }}
---
{{- end }}
{{- end }}

View File

@ -0,0 +1,48 @@
{{ if (or (not .Values.persistence.enabled) (eq .Values.persistence.type "pvc")) }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "grafana.fullname" . }}
namespace: {{ template "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- if .Values.labels }}
{{ toYaml .Values.labels | indent 4 }}
{{- end }}
{{- with .Values.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
replicas: {{ .Values.replicas }}
revisionHistoryLimit: {{ .Values.revisionHistoryLimit }}
selector:
matchLabels:
{{- include "grafana.selectorLabels" . | nindent 6 }}
{{- with .Values.deploymentStrategy }}
strategy:
{{ toYaml . | trim | indent 4 }}
{{- end }}
template:
metadata:
labels:
{{- include "grafana.selectorLabels" . | nindent 8 }}
{{- with .Values.podLabels }}
{{ toYaml . | indent 8 }}
{{- end }}
annotations:
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
checksum/dashboards-json-config: {{ include (print $.Template.BasePath "/dashboards-json-configmap.yaml") . | sha256sum }}
checksum/sc-dashboard-provider-config: {{ include (print $.Template.BasePath "/configmap-dashboard-provider.yaml") . | sha256sum }}
{{- if or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret)) }}
checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}
{{- end }}
{{- if .Values.envRenderSecret }}
checksum/secret-env: {{ include (print $.Template.BasePath "/secret-env.yaml") . | sha256sum }}
{{- end }}
{{- with .Values.podAnnotations }}
{{ toYaml . | indent 8 }}
{{- end }}
spec:
{{- include "grafana.pod" . | nindent 6 }}
{{- end }}

Some files were not shown because too many files have changed in this diff Show More