Bump CRD chart version of cis-benchmark (#1077)

pull/1090/head
actions 2021-04-05 19:54:08 +00:00
parent 724773afbc
commit e53b737604
489 changed files with 110528 additions and 0 deletions

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,10 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/namespace: cis-operator-system
catalog.cattle.io/release-name: rancher-cis-benchmark-crd
apiVersion: v1
description: Installs the CRDs for rancher-cis-benchmark.
name: rancher-cis-benchmark-crd
type: application
version: 1.0.400-rc00

View File

@ -0,0 +1,2 @@
# rancher-cis-benchmark-crd
A Rancher chart that installs the CRDs used by rancher-cis-benchmark.

View File

@ -0,0 +1,149 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clusterscans.cis.cattle.io
spec:
additionalPrinterColumns:
- JSONPath: .status.lastRunScanProfileName
name: ClusterScanProfile
type: string
- JSONPath: .status.summary.total
name: Total
type: string
- JSONPath: .status.summary.pass
name: Pass
type: string
- JSONPath: .status.summary.fail
name: Fail
type: string
- JSONPath: .status.summary.skip
name: Skip
type: string
- JSONPath: .status.summary.warn
name: Warn
type: string
- JSONPath: .status.summary.notApplicable
name: Not Applicable
type: string
- JSONPath: .status.lastRunTimestamp
name: LastRunTimestamp
type: string
- JSONPath: .spec.scheduledScanConfig.cronSchedule
name: CronSchedule
type: string
group: cis.cattle.io
names:
kind: ClusterScan
plural: clusterscans
scope: Cluster
subresources:
status: {}
validation:
openAPIV3Schema:
properties:
spec:
properties:
scanProfileName:
nullable: true
type: string
scheduledScanConfig:
nullable: true
properties:
cronSchedule:
nullable: true
type: string
retentionCount:
type: integer
scanAlertRule:
nullable: true
properties:
alertOnComplete:
type: boolean
alertOnFailure:
type: boolean
type: object
type: object
scoreWarning:
enum:
- pass
- fail
nullable: true
type: string
type: object
status:
properties:
NextScanAt:
nullable: true
type: string
ScanAlertingRuleName:
nullable: true
type: string
conditions:
items:
properties:
lastTransitionTime:
nullable: true
type: string
lastUpdateTime:
nullable: true
type: string
message:
nullable: true
type: string
reason:
nullable: true
type: string
status:
nullable: true
type: string
type:
nullable: true
type: string
type: object
nullable: true
type: array
display:
nullable: true
properties:
error:
type: boolean
message:
nullable: true
type: string
state:
nullable: true
type: string
transitioning:
type: boolean
type: object
lastRunScanProfileName:
nullable: true
type: string
lastRunTimestamp:
nullable: true
type: string
observedGeneration:
type: integer
summary:
nullable: true
properties:
fail:
type: integer
notApplicable:
type: integer
pass:
type: integer
skip:
type: integer
total:
type: integer
warn:
type: integer
type: object
type: object
type: object
version: v1
versions:
- name: v1
served: true
storage: true

View File

@ -0,0 +1,55 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clusterscanbenchmarks.cis.cattle.io
spec:
additionalPrinterColumns:
- JSONPath: .spec.clusterProvider
name: ClusterProvider
type: string
- JSONPath: .spec.minKubernetesVersion
name: MinKubernetesVersion
type: string
- JSONPath: .spec.maxKubernetesVersion
name: MaxKubernetesVersion
type: string
- JSONPath: .spec.customBenchmarkConfigMapName
name: customBenchmarkConfigMapName
type: string
- JSONPath: .spec.customBenchmarkConfigMapNamespace
name: customBenchmarkConfigMapNamespace
type: string
group: cis.cattle.io
names:
kind: ClusterScanBenchmark
plural: clusterscanbenchmarks
scope: Cluster
subresources:
status: {}
validation:
openAPIV3Schema:
properties:
spec:
properties:
clusterProvider:
nullable: true
type: string
customBenchmarkConfigMapName:
nullable: true
type: string
customBenchmarkConfigMapNamespace:
nullable: true
type: string
maxKubernetesVersion:
nullable: true
type: string
minKubernetesVersion:
nullable: true
type: string
type: object
type: object
version: v1
versions:
- name: v1
served: true
storage: true

View File

@ -0,0 +1,37 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clusterscanprofiles.cis.cattle.io
spec:
additionalPrinterColumns:
- JSONPath: .spec.benchmarkVersion
name: BenchmarkVersion
type: string
group: cis.cattle.io
names:
kind: ClusterScanProfile
plural: clusterscanprofiles
scope: Cluster
subresources:
status: {}
validation:
openAPIV3Schema:
properties:
spec:
properties:
benchmarkVersion:
nullable: true
type: string
skipTests:
items:
nullable: true
type: string
nullable: true
type: array
type: object
type: object
version: v1
versions:
- name: v1
served: true
storage: true

View File

@ -0,0 +1,40 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clusterscanreports.cis.cattle.io
spec:
additionalPrinterColumns:
- JSONPath: .spec.lastRunTimestamp
name: LastRunTimestamp
type: string
- JSONPath: .spec.benchmarkVersion
name: BenchmarkVersion
type: string
group: cis.cattle.io
names:
kind: ClusterScanReport
plural: clusterscanreports
scope: Cluster
subresources:
status: {}
validation:
openAPIV3Schema:
properties:
spec:
properties:
benchmarkVersion:
nullable: true
type: string
lastRunTimestamp:
nullable: true
type: string
reportJSON:
nullable: true
type: string
type: object
type: object
version: v1
versions:
- name: v1
served: true
storage: true

View File

@ -0,0 +1,10 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/namespace: cattle-logging-system
catalog.cattle.io/release-name: rancher-logging-crd
apiVersion: v1
description: Installs the CRDs for rancher-logging.
name: rancher-logging-crd
type: application
version: 3.9.002-rc03

View File

@ -0,0 +1,2 @@
# rancher-logging-crd
A Rancher chart that installs the CRDs used by rancher-logging.

View File

@ -0,0 +1,765 @@
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.2.4
creationTimestamp: null
name: clusterflows.logging.banzaicloud.io
spec:
additionalPrinterColumns:
- JSONPath: .status.active
description: Is the flow active?
name: Active
type: boolean
- JSONPath: .status.problemsCount
description: Number of problems
name: Problems
type: integer
group: logging.banzaicloud.io
names:
categories:
- logging-all
kind: ClusterFlow
listKind: ClusterFlowList
plural: clusterflows
singular: clusterflow
preserveUnknownFields: false
scope: Namespaced
subresources:
status: {}
validation:
openAPIV3Schema:
properties:
apiVersion:
type: string
kind:
type: string
metadata:
type: object
spec:
properties:
filters:
items:
properties:
concat:
properties:
continuous_line_regexp:
type: string
flush_interval:
type: integer
keep_partial_key:
type: boolean
keep_partial_metadata:
type: string
key:
type: string
multiline_end_regexp:
type: string
multiline_start_regexp:
type: string
n_lines:
type: integer
partial_key:
type: string
partial_value:
type: string
separator:
type: string
stream_identity_key:
type: string
timeout_label:
type: string
use_first_timestamp:
type: boolean
use_partial_metadata:
type: string
type: object
dedot:
properties:
de_dot_nested:
type: boolean
de_dot_separator:
type: string
type: object
detectExceptions:
properties:
languages:
items:
type: string
type: array
max_bytes:
type: integer
max_lines:
type: integer
message:
type: string
multiline_flush_interval:
type: string
remove_tag_prefix:
type: string
stream:
type: string
type: object
enhanceK8s:
properties:
api_groups:
items:
type: string
type: array
bearer_token_file:
type: string
ca_file:
properties:
mountFrom:
properties:
secretKeyRef:
properties:
key:
type: string
name:
type: string
optional:
type: boolean
required:
- key
type: object
type: object
value:
type: string
valueFrom:
properties:
secretKeyRef:
properties:
key:
type: string
name:
type: string
optional:
type: boolean
required:
- key
type: object
type: object
type: object
cache_refresh:
type: integer
cache_refresh_variation:
type: integer
cache_size:
type: integer
cache_ttl:
type: integer
client_cert:
properties:
mountFrom:
properties:
secretKeyRef:
properties:
key:
type: string
name:
type: string
optional:
type: boolean
required:
- key
type: object
type: object
value:
type: string
valueFrom:
properties:
secretKeyRef:
properties:
key:
type: string
name:
type: string
optional:
type: boolean
required:
- key
type: object
type: object
type: object
client_key:
properties:
mountFrom:
properties:
secretKeyRef:
properties:
key:
type: string
name:
type: string
optional:
type: boolean
required:
- key
type: object
type: object
value:
type: string
valueFrom:
properties:
secretKeyRef:
properties:
key:
type: string
name:
type: string
optional:
type: boolean
required:
- key
type: object
type: object
type: object
core_api_versions:
items:
type: string
type: array
data_type:
type: string
in_namespace_path:
items:
type: string
type: array
in_pod_path:
items:
type: string
type: array
kubernetes_url:
type: string
secret_dir:
type: string
ssl_partial_chain:
type: boolean
verify_ssl:
type: boolean
type: object
geoip:
properties:
backend_library:
type: string
geoip_2_database:
type: string
geoip_database:
type: string
geoip_lookup_keys:
type: string
records:
items:
additionalProperties:
type: string
type: object
type: array
skip_adding_null_record:
type: boolean
type: object
grep:
properties:
and:
items:
properties:
exclude:
items:
properties:
key:
type: string
pattern:
type: string
required:
- key
- pattern
type: object
type: array
regexp:
items:
properties:
key:
type: string
pattern:
type: string
required:
- key
- pattern
type: object
type: array
type: object
type: array
exclude:
items:
properties:
key:
type: string
pattern:
type: string
required:
- key
- pattern
type: object
type: array
or:
items:
properties:
exclude:
items:
properties:
key:
type: string
pattern:
type: string
required:
- key
- pattern
type: object
type: array
regexp:
items:
properties:
key:
type: string
pattern:
type: string
required:
- key
- pattern
type: object
type: array
type: object
type: array
regexp:
items:
properties:
key:
type: string
pattern:
type: string
required:
- key
- pattern
type: object
type: array
type: object
parser:
properties:
emit_invalid_record_to_error:
type: boolean
hash_value_field:
type: string
inject_key_prefix:
type: string
key_name:
type: string
parse:
properties:
delimiter:
type: string
delimiter_pattern:
type: string
estimate_current_event:
type: boolean
expression:
type: string
format:
type: string
format_firstline:
type: string
keep_time_key:
type: boolean
label_delimiter:
type: string
local_time:
type: boolean
multiline:
items:
type: string
type: array
null_empty_string:
type: boolean
null_value_pattern:
type: string
patterns:
items:
properties:
estimate_current_event:
type: boolean
expression:
type: string
format:
type: string
keep_time_key:
type: boolean
local_time:
type: boolean
null_empty_string:
type: boolean
null_value_pattern:
type: string
time_format:
type: string
time_key:
type: string
time_type:
type: string
timezone:
type: string
type:
type: string
types:
type: string
utc:
type: boolean
type: object
type: array
time_format:
type: string
time_key:
type: string
time_type:
type: string
timezone:
type: string
type:
type: string
types:
type: string
utc:
type: boolean
type: object
parsers:
items:
properties:
delimiter:
type: string
delimiter_pattern:
type: string
estimate_current_event:
type: boolean
expression:
type: string
format:
type: string
format_firstline:
type: string
keep_time_key:
type: boolean
label_delimiter:
type: string
local_time:
type: boolean
multiline:
items:
type: string
type: array
null_empty_string:
type: boolean
null_value_pattern:
type: string
patterns:
items:
properties:
estimate_current_event:
type: boolean
expression:
type: string
format:
type: string
keep_time_key:
type: boolean
local_time:
type: boolean
null_empty_string:
type: boolean
null_value_pattern:
type: string
time_format:
type: string
time_key:
type: string
time_type:
type: string
timezone:
type: string
type:
type: string
types:
type: string
utc:
type: boolean
type: object
type: array
time_format:
type: string
time_key:
type: string
time_type:
type: string
timezone:
type: string
type:
type: string
types:
type: string
utc:
type: boolean
type: object
type: array
remove_key_name_field:
type: boolean
replace_invalid_sequence:
type: boolean
reserve_data:
type: boolean
reserve_time:
type: boolean
type: object
prometheus:
properties:
labels:
additionalProperties:
type: string
type: object
metrics:
items:
properties:
buckets:
type: string
desc:
type: string
key:
type: string
labels:
additionalProperties:
type: string
type: object
name:
type: string
type:
type: string
required:
- desc
- name
- type
type: object
type: array
type: object
record_modifier:
properties:
char_encoding:
type: string
prepare_value:
type: string
records:
items:
additionalProperties:
type: string
type: object
type: array
remove_keys:
type: string
replaces:
items:
properties:
expression:
type: string
key:
type: string
replace:
type: string
required:
- expression
- key
- replace
type: object
type: array
whitelist_keys:
type: string
type: object
record_transformer:
properties:
auto_typecast:
type: boolean
enable_ruby:
type: boolean
keep_keys:
type: string
records:
items:
additionalProperties:
type: string
type: object
type: array
remove_keys:
type: string
renew_record:
type: boolean
renew_time_key:
type: string
type: object
stdout:
properties:
output_type:
type: string
type: object
sumologic:
properties:
collector_key_name:
type: string
collector_value:
type: string
exclude_container_regex:
type: string
exclude_facility_regex:
type: string
exclude_host_regex:
type: string
exclude_namespace_regex:
type: string
exclude_pod_regex:
type: string
exclude_priority_regex:
type: string
exclude_unit_regex:
type: string
log_format:
type: string
source_category:
type: string
source_category_key_name:
type: string
source_category_prefix:
type: string
source_category_replace_dash:
type: string
source_host:
type: string
source_host_key_name:
type: string
source_name:
type: string
source_name_key_name:
type: string
tracing_annotation_prefix:
type: string
tracing_container_name:
type: string
tracing_format:
type: boolean
tracing_host:
type: string
tracing_label_prefix:
type: string
tracing_namespace:
type: string
tracing_pod:
type: string
tracing_pod_id:
type: string
type: object
tag_normaliser:
properties:
format:
type: string
type: object
throttle:
properties:
group_bucket_limit:
type: integer
group_bucket_period_s:
type: integer
group_drop_logs:
type: boolean
group_key:
type: string
group_reset_rate_s:
type: integer
group_warning_delay_s:
type: integer
type: object
type: object
type: array
globalOutputRefs:
items:
type: string
type: array
loggingRef:
type: string
match:
items:
properties:
exclude:
properties:
container_names:
items:
type: string
type: array
hosts:
items:
type: string
type: array
labels:
additionalProperties:
type: string
type: object
namespaces:
items:
type: string
type: array
type: object
select:
properties:
container_names:
items:
type: string
type: array
hosts:
items:
type: string
type: array
labels:
additionalProperties:
type: string
type: object
namespaces:
items:
type: string
type: array
type: object
type: object
type: array
outputRefs:
items:
type: string
type: array
selectors:
additionalProperties:
type: string
type: object
type: object
status:
properties:
active:
type: boolean
problems:
items:
type: string
type: array
problemsCount:
type: integer
type: object
type: object
version: v1beta1
versions:
- name: v1beta1
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []

View File

@ -0,0 +1,761 @@
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.2.4
creationTimestamp: null
name: flows.logging.banzaicloud.io
spec:
additionalPrinterColumns:
- JSONPath: .status.active
description: Is the flow active?
name: Active
type: boolean
- JSONPath: .status.problemsCount
description: Number of problems
name: Problems
type: integer
group: logging.banzaicloud.io
names:
categories:
- logging-all
kind: Flow
listKind: FlowList
plural: flows
singular: flow
preserveUnknownFields: false
scope: Namespaced
subresources:
status: {}
validation:
openAPIV3Schema:
properties:
apiVersion:
type: string
kind:
type: string
metadata:
type: object
spec:
properties:
filters:
items:
properties:
concat:
properties:
continuous_line_regexp:
type: string
flush_interval:
type: integer
keep_partial_key:
type: boolean
keep_partial_metadata:
type: string
key:
type: string
multiline_end_regexp:
type: string
multiline_start_regexp:
type: string
n_lines:
type: integer
partial_key:
type: string
partial_value:
type: string
separator:
type: string
stream_identity_key:
type: string
timeout_label:
type: string
use_first_timestamp:
type: boolean
use_partial_metadata:
type: string
type: object
dedot:
properties:
de_dot_nested:
type: boolean
de_dot_separator:
type: string
type: object
detectExceptions:
properties:
languages:
items:
type: string
type: array
max_bytes:
type: integer
max_lines:
type: integer
message:
type: string
multiline_flush_interval:
type: string
remove_tag_prefix:
type: string
stream:
type: string
type: object
enhanceK8s:
properties:
api_groups:
items:
type: string
type: array
bearer_token_file:
type: string
ca_file:
properties:
mountFrom:
properties:
secretKeyRef:
properties:
key:
type: string
name:
type: string
optional:
type: boolean
required:
- key
type: object
type: object
value:
type: string
valueFrom:
properties:
secretKeyRef:
properties:
key:
type: string
name:
type: string
optional:
type: boolean
required:
- key
type: object
type: object
type: object
cache_refresh:
type: integer
cache_refresh_variation:
type: integer
cache_size:
type: integer
cache_ttl:
type: integer
client_cert:
properties:
mountFrom:
properties:
secretKeyRef:
properties:
key:
type: string
name:
type: string
optional:
type: boolean
required:
- key
type: object
type: object
value:
type: string
valueFrom:
properties:
secretKeyRef:
properties:
key:
type: string
name:
type: string
optional:
type: boolean
required:
- key
type: object
type: object
type: object
client_key:
properties:
mountFrom:
properties:
secretKeyRef:
properties:
key:
type: string
name:
type: string
optional:
type: boolean
required:
- key
type: object
type: object
value:
type: string
valueFrom:
properties:
secretKeyRef:
properties:
key:
type: string
name:
type: string
optional:
type: boolean
required:
- key
type: object
type: object
type: object
core_api_versions:
items:
type: string
type: array
data_type:
type: string
in_namespace_path:
items:
type: string
type: array
in_pod_path:
items:
type: string
type: array
kubernetes_url:
type: string
secret_dir:
type: string
ssl_partial_chain:
type: boolean
verify_ssl:
type: boolean
type: object
geoip:
properties:
backend_library:
type: string
geoip_2_database:
type: string
geoip_database:
type: string
geoip_lookup_keys:
type: string
records:
items:
additionalProperties:
type: string
type: object
type: array
skip_adding_null_record:
type: boolean
type: object
grep:
properties:
and:
items:
properties:
exclude:
items:
properties:
key:
type: string
pattern:
type: string
required:
- key
- pattern
type: object
type: array
regexp:
items:
properties:
key:
type: string
pattern:
type: string
required:
- key
- pattern
type: object
type: array
type: object
type: array
exclude:
items:
properties:
key:
type: string
pattern:
type: string
required:
- key
- pattern
type: object
type: array
or:
items:
properties:
exclude:
items:
properties:
key:
type: string
pattern:
type: string
required:
- key
- pattern
type: object
type: array
regexp:
items:
properties:
key:
type: string
pattern:
type: string
required:
- key
- pattern
type: object
type: array
type: object
type: array
regexp:
items:
properties:
key:
type: string
pattern:
type: string
required:
- key
- pattern
type: object
type: array
type: object
parser:
properties:
emit_invalid_record_to_error:
type: boolean
hash_value_field:
type: string
inject_key_prefix:
type: string
key_name:
type: string
parse:
properties:
delimiter:
type: string
delimiter_pattern:
type: string
estimate_current_event:
type: boolean
expression:
type: string
format:
type: string
format_firstline:
type: string
keep_time_key:
type: boolean
label_delimiter:
type: string
local_time:
type: boolean
multiline:
items:
type: string
type: array
null_empty_string:
type: boolean
null_value_pattern:
type: string
patterns:
items:
properties:
estimate_current_event:
type: boolean
expression:
type: string
format:
type: string
keep_time_key:
type: boolean
local_time:
type: boolean
null_empty_string:
type: boolean
null_value_pattern:
type: string
time_format:
type: string
time_key:
type: string
time_type:
type: string
timezone:
type: string
type:
type: string
types:
type: string
utc:
type: boolean
type: object
type: array
time_format:
type: string
time_key:
type: string
time_type:
type: string
timezone:
type: string
type:
type: string
types:
type: string
utc:
type: boolean
type: object
parsers:
items:
properties:
delimiter:
type: string
delimiter_pattern:
type: string
estimate_current_event:
type: boolean
expression:
type: string
format:
type: string
format_firstline:
type: string
keep_time_key:
type: boolean
label_delimiter:
type: string
local_time:
type: boolean
multiline:
items:
type: string
type: array
null_empty_string:
type: boolean
null_value_pattern:
type: string
patterns:
items:
properties:
estimate_current_event:
type: boolean
expression:
type: string
format:
type: string
keep_time_key:
type: boolean
local_time:
type: boolean
null_empty_string:
type: boolean
null_value_pattern:
type: string
time_format:
type: string
time_key:
type: string
time_type:
type: string
timezone:
type: string
type:
type: string
types:
type: string
utc:
type: boolean
type: object
type: array
time_format:
type: string
time_key:
type: string
time_type:
type: string
timezone:
type: string
type:
type: string
types:
type: string
utc:
type: boolean
type: object
type: array
remove_key_name_field:
type: boolean
replace_invalid_sequence:
type: boolean
reserve_data:
type: boolean
reserve_time:
type: boolean
type: object
prometheus:
properties:
labels:
additionalProperties:
type: string
type: object
metrics:
items:
properties:
buckets:
type: string
desc:
type: string
key:
type: string
labels:
additionalProperties:
type: string
type: object
name:
type: string
type:
type: string
required:
- desc
- name
- type
type: object
type: array
type: object
record_modifier:
properties:
char_encoding:
type: string
prepare_value:
type: string
records:
items:
additionalProperties:
type: string
type: object
type: array
remove_keys:
type: string
replaces:
items:
properties:
expression:
type: string
key:
type: string
replace:
type: string
required:
- expression
- key
- replace
type: object
type: array
whitelist_keys:
type: string
type: object
record_transformer:
properties:
auto_typecast:
type: boolean
enable_ruby:
type: boolean
keep_keys:
type: string
records:
items:
additionalProperties:
type: string
type: object
type: array
remove_keys:
type: string
renew_record:
type: boolean
renew_time_key:
type: string
type: object
stdout:
properties:
output_type:
type: string
type: object
sumologic:
properties:
collector_key_name:
type: string
collector_value:
type: string
exclude_container_regex:
type: string
exclude_facility_regex:
type: string
exclude_host_regex:
type: string
exclude_namespace_regex:
type: string
exclude_pod_regex:
type: string
exclude_priority_regex:
type: string
exclude_unit_regex:
type: string
log_format:
type: string
source_category:
type: string
source_category_key_name:
type: string
source_category_prefix:
type: string
source_category_replace_dash:
type: string
source_host:
type: string
source_host_key_name:
type: string
source_name:
type: string
source_name_key_name:
type: string
tracing_annotation_prefix:
type: string
tracing_container_name:
type: string
tracing_format:
type: boolean
tracing_host:
type: string
tracing_label_prefix:
type: string
tracing_namespace:
type: string
tracing_pod:
type: string
tracing_pod_id:
type: string
type: object
tag_normaliser:
properties:
format:
type: string
type: object
throttle:
properties:
group_bucket_limit:
type: integer
group_bucket_period_s:
type: integer
group_drop_logs:
type: boolean
group_key:
type: string
group_reset_rate_s:
type: integer
group_warning_delay_s:
type: integer
type: object
type: object
type: array
globalOutputRefs:
items:
type: string
type: array
localOutputRefs:
items:
type: string
type: array
loggingRef:
type: string
match:
items:
properties:
exclude:
properties:
container_names:
items:
type: string
type: array
hosts:
items:
type: string
type: array
labels:
additionalProperties:
type: string
type: object
type: object
select:
properties:
container_names:
items:
type: string
type: array
hosts:
items:
type: string
type: array
labels:
additionalProperties:
type: string
type: object
type: object
type: object
type: array
outputRefs:
items:
type: string
type: array
selectors:
additionalProperties:
type: string
type: object
type: object
status:
properties:
active:
type: boolean
problems:
items:
type: string
type: array
problemsCount:
type: integer
type: object
type: object
version: v1beta1
versions:
- name: v1beta1
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []

View File

@ -0,0 +1,22 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,20 @@
annotations:
catalog.cattle.io/auto-install: rancher-logging-crd=match
catalog.cattle.io/certified: rancher
catalog.cattle.io/display-name: Logging
catalog.cattle.io/namespace: cattle-logging-system
catalog.cattle.io/os: linux
catalog.cattle.io/provides-gvr: logging.banzaicloud.io.clusterflow/v1beta1
catalog.cattle.io/release-name: rancher-logging
catalog.cattle.io/ui-component: logging
apiVersion: v1
appVersion: 3.9.0
description: Collects and filter logs using highly configurable CRDs. Powered by Banzai
Cloud Logging Operator.
icon: https://charts.rancher.io/assets/logos/logging.svg
keywords:
- logging
- monitoring
- security
name: rancher-logging
version: 3.9.002-rc03

View File

@ -0,0 +1,130 @@
# Logging operator Chart
[Logging operator](https://github.com/banzaicloud/logging-operator) Managed centralized logging component fluentd and fluent-bit instance on cluster.
## tl;dr:
```bash
$ helm repo add banzaicloud-stable https://kubernetes-charts.banzaicloud.com
$ helm repo update
$ helm install banzaicloud-stable/logging-operator
```
## Introduction
This chart bootstraps a [Logging Operator](https://github.com/banzaicloud/logging-operator) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
## Prerequisites
- Kubernetes 1.8+ with Beta APIs enabled
## Installing the Chart
To install the chart with the release name `my-release`:
```bash
$ helm install --name my-release banzaicloud-stable/logging-operator
```
### CRDs
Use `createCustomResource=false` with Helm v3 to avoid trying to create CRDs from the `crds` folder and from templates at the same time.
The command deploys **Logging operator** on the Kubernetes cluster with the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
## Uninstalling the Chart
To uninstall/delete the `my-release` deployment:
```bash
$ helm delete my-release
```
The command removes all Kubernetes components associated with the chart and deletes the release.
## Configuration
The following tables lists the configurable parameters of the logging-operator chart and their default values.
| Parameter | Description | Default |
| --------------------------------------------------- | ------------------------------------------------------ | ------------------------------ |
| `image.repository` | Container image repository | `ghcr.io/banzaicloud/logging-operator` |
| `image.tag` | Container image tag | `3.9.0` |
| `image.pullPolicy` | Container pull policy | `IfNotPresent` |
| `nameOverride` | Override name of app | `` |
| `fullnameOverride` | Override full name of app | `` |
| `namespaceOverride` | Override namespace of app | `` |
| `watchNamespace` | Namespace to watch for LoggingOperator CRD | `` |
| `rbac.enabled` | Create rbac service account and roles | `true` |
| `rbac.psp.enabled` | Must be used with `rbac.enabled` true. If true, creates & uses RBAC resources required in the cluster with [Pod Security Policies](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) enabled. | `false` |
| `priorityClassName` | Operator priorityClassName | `{}` |
| `affinity` | Node Affinity | `{}` |
| `resources` | CPU/Memory resource requests/limits | `{}` |
| `tolerations` | Node Tolerations | `[]` |
| `nodeSelector` | Define which Nodes the Pods are scheduled on. | `{}` |
| `annotations` | Define annotations for logging-operator pods | `{}` |
| `podSecurityContext` | Pod SecurityContext for Logging operator. [More info](https://kubernetes.io/docs/concepts/policy/security-context/) | `{"runAsNonRoot": true, "runAsUser": 1000, "fsGroup": 2000}` |
| `securityContext` | Container SecurityContext for Logging operator. [More info](https://kubernetes.io/docs/concepts/policy/security-context/) | `{"allowPrivilegeEscalation": false, "readOnlyRootFilesystem": true}` |
| `createCustomResource` | Create CRDs. | `true` |
| `monitoring.serviceMonitor.enabled` | Create Prometheus Operator servicemonitor. | `false` |
| `global.seLinux.enabled` | Add seLinuxOptions to Logging resources, requires the [rke2-selinux RPM](https://github.com/rancher/rke2-selinux/releases) | `false` |
Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example:
```bash
$ helm install --name my-release -f values.yaml banzaicloud-stable/logging-operator
```
> **Tip**: You can use the default [values.yaml](values.yaml)
## Installing Fluentd and Fluent-bit via logging
The previous chart does **not** install `logging` resource to deploy Fluentd and Fluent-bit on cluster. To install them please use the [Logging Operator Logging](https://github.com/banzaicloud/logging-operator/tree/master/charts/logging-operator-logging) chart.
## tl;dr:
```bash
$ helm repo add banzaicloud-stable https://kubernetes-charts.banzaicloud.com
$ helm repo update
$ helm install banzaicloud-stable/logging-operator-logging
```
## Configuration
The following tables lists the configurable parameters of the logging-operator-logging chart and their default values.
## tl;dr:
```bash
$ helm repo add banzaicloud-stable https://kubernetes-charts.banzaicloud.com
$ helm repo update
$ helm install banzaicloud-stable/logging-operator-logging
```
## Configuration
The following tables lists the configurable parameters of the logging-operator-logging chart and their default values.
| Parameter | Description | Default |
| --------------------------------------------------- | ------------------------------------------------------ | ------------------------------ |
| `tls.enabled` | Enabled TLS communication between components | true |
| `tls.fluentdSecretName` | Specified secret name, which contain tls certs | This will overwrite automatic Helm certificate generation. |
| `tls.fluentbitSecretName` | Specified secret name, which contain tls certs | This will overwrite automatic Helm certificate generation. |
| `tls.sharedKey` | Shared key between nodes (fluentd-fluentbit) | [autogenerated] |
| `fluentbit.enabled` | Install fluent-bit | true |
| `fluentbit.namespace` | Specified fluentbit installation namespace | same as operator namespace |
| `fluentbit.image.tag` | Fluentbit container image tag | `1.6.10` |
| `fluentbit.image.repository` | Fluentbit container image repository | `fluent/fluent-bit` |
| `fluentbit.image.pullPolicy` | Fluentbit container pull policy | `IfNotPresent` |
| `fluentd.enabled` | Install fluentd | true |
| `fluentd.image.tag` | Fluentd container image tag | `v1.11.5-alpine-9` |
| `fluentd.image.repository` | Fluentd container image repository | `ghcr.io/banzaicloud/fluentd` |
| `fluentd.image.pullPolicy` | Fluentd container pull policy | `IfNotPresent` |
| `fluentd.volumeModImage.tag` | Fluentd volumeModImage container image tag | `latest` |
| `fluentd.volumeModImage.repository` | Fluentd volumeModImage container image repository | `busybox` |
| `fluentd.volumeModImage.pullPolicy` | Fluentd volumeModImage container pull policy | `IfNotPresent` |
| `fluentd.configReloaderImage.tag` | Fluentd configReloaderImage container image tag | `v0.2.2` |
| `fluentd.configReloaderImage.repository` | Fluentd configReloaderImage container image repository | `jimmidyson/configmap-reload` |
| `fluentd.configReloaderImage.pullPolicy` | Fluentd configReloaderImage container pull policy | `IfNotPresent` |
| `fluentd.fluentdPvcSpec.accessModes` | Fluentd persistence volume access modes | `[ReadWriteOnce]` |
| `fluentd.fluentdPvcSpec.resources.requests.storage` | Fluentd persistence volume size | `21Gi` |
| `fluentd.fluentdPvcSpec.storageClassName` | Fluentd persistence volume storageclass | `"""` |

View File

@ -0,0 +1,22 @@
# Rancher Logging
This chart is based off of the upstream [Banzai Logging Operator](https://banzaicloud.com/docs/one-eye/logging-operator/) chart. The chart deploys a logging operator and CRDs, which allows users to configure complex logging pipelines with a few simple custom resources. There are two levels of logging, which allow you to collect all logs in a cluster or from a single namespace.
For more information on how to use the feature, refer to our [docs](https://rancher.com/docs/rancher/v2.x/en/logging/v2.5/).
## Namespace-level logging
To collect logs from a single namespace, users create flows and these flows are connected to outputs or cluster outputs.
## Cluster-level logging
To collect logs from an entire cluster, users create cluster flows and cluster outputs.
## CRDs
- [Cluster Flow](https://banzaicloud.com/docs/one-eye/logging-operator/crds/v1beta1/clusterflow_types/) - A cluster flow is a CRD (`ClusterFlow`) that defines what logs to collect from the entire cluster. The cluster flow must be deployed in the same namespace as the logging operator.
- [Cluster Output](https://banzaicloud.com/docs/one-eye/logging-operator/crds/v1beta1/clusteroutput_types/) - A cluster output is a CRD (`ClusterOutput`) that defines how to connect to logging providers so they can start collecting logs. The cluster output must be deployed in the same namespace as the logging operator. The convenience of using a cluster output is that either a cluster flow or flow can send logs to those providers without needing to define specific outputs in each namespace for each flow.
- [Flow](https://banzaicloud.com/docs/one-eye/logging-operator/crds/v1beta1/flow_types/) - A flow is a CRD (`Flow`) that defines what logs to collect from the namespace that it is deployed in.
- [Output](https://banzaicloud.com/docs/one-eye/logging-operator/crds/v1beta1/output_types/) - An output is a CRD (`Output`) that defines how to connect to logging providers so logs can be sent to the provider.
For more information on how to configure the Helm chart, refer to the Helm README.

View File

@ -0,0 +1,66 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "logging-operator.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "logging-operator.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Provides the namespace the chart will be installed in using the builtin .Release.Namespace,
or, if provided, a manually overwritten namespace value.
*/}}
{{- define "logging-operator.namespace" -}}
{{- if .Values.namespaceOverride -}}
{{ .Values.namespaceOverride -}}
{{- else -}}
{{ .Release.Namespace }}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "logging-operator.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "logging-operator.labels" -}}
app.kubernetes.io/name: {{ include "logging-operator.name" . }}
helm.sh/chart: {{ include "logging-operator.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,167 @@
{{- if .Values.rbac.enabled }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
name: {{ template "logging-operator.fullname" . }}
rules:
- apiGroups:
- ""
resources:
- configmaps
- secrets
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- endpoints
- namespaces
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- persistentvolumeclaims
- pods
- serviceaccounts
- services
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
- events.k8s.io
resources:
- events
verbs:
- create
- get
- list
- watch
- apiGroups:
- apps
resources:
- daemonsets
- replicasets
- statefulsets
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- apps
- extensions
resources:
- deployments
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- extensions
- networking.k8s.io
resources:
- ingresses
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- extensions
- policy
resources:
- podsecuritypolicies
verbs:
- create
- delete
- get
- list
- patch
- update
- use
- watch
- apiGroups:
- logging.banzaicloud.io
resources:
- clusterflows
- clusteroutputs
- flows
- loggings
- outputs
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- logging.banzaicloud.io
resources:
- clusterflows/status
- clusteroutputs/status
- flows/status
- loggings/status
- outputs/status
verbs:
- get
- patch
- update
- apiGroups:
- monitoring.coreos.com
resources:
- servicemonitors
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- rbac.authorization.k8s.io
resources:
- clusterrolebindings
- clusterroles
- rolebindings
- roles
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
{{- end }}

View File

@ -0,0 +1,18 @@
{{- if .Values.rbac.enabled }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "logging-operator.fullname" . }}
labels:
{{ include "logging-operator.labels" . | indent 4 }}
subjects:
- kind: ServiceAccount
name: {{ template "logging-operator.fullname" . }}
namespace: {{ include "logging-operator.namespace" . }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "logging-operator.fullname" . }}
{{- end }}

View File

@ -0,0 +1,6 @@
{{- if .Values.createCustomResource -}}
{{- range $path, $bytes := .Files.Glob "crds/*.yaml" }}
{{ $.Files.Get $path }}
---
{{- end }}
{{- end }}

View File

@ -0,0 +1,62 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "logging-operator.fullname" . }}
namespace: {{ include "logging-operator.namespace" . }}
labels:
{{ include "logging-operator.labels" . | indent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "logging-operator.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
template:
metadata:
labels:
app.kubernetes.io/name: {{ include "logging-operator.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- with .Values.annotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- if .Values.priorityClassName }}
priorityClassName: {{ .Values.priorityClassName }}
{{- end }}
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
containers:
- name: {{ .Chart.Name }}
image: "{{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
ports:
- name: http
containerPort: {{ .Values.http.port }}
{{- if .Values.securityContext }}
securityContext: {{ toYaml .Values.securityContext | nindent 12 }}
{{- end }}
{{- if .Values.podSecurityContext }}
securityContext: {{ toYaml .Values.podSecurityContext | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.rbac.enabled }}
serviceAccountName: {{ include "logging-operator.fullname" . }}
{{- end }}

View File

@ -0,0 +1,58 @@
{{- if .Values.additionalLoggingSources.aks.enabled }}
apiVersion: logging.banzaicloud.io/v1beta1
kind: Logging
metadata:
name: {{ .Release.Name }}-aks
namespace: {{ .Release.Namespace }}
labels:
{{ include "logging-operator.labels" . | indent 4 }}
spec:
controlNamespace: {{ .Release.Namespace }}
fluentbit:
image:
repository: {{ template "system_default_registry" . }}{{ .Values.images.fluentbit.repository }}
tag: {{ .Values.images.fluentbit.tag }}
inputTail:
Tag: "aks"
Path: "/var/log/azure/kubelet-status.log"
{{- if .Values.global.psp.enabled }}
security:
podSecurityPolicyCreate: true
roleBasedAccessControlCreate: true
{{- end }}
{{- $total_tolerations := concat (.Values.tolerations) (.Values.fluentbit.tolerations) }}
{{- with $total_tolerations }}
tolerations: {{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector: {{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.fluentbit.resources }}
resources: {{- toYaml . | nindent 6 }}
{{- end }}
fluentd:
image:
repository: {{ template "system_default_registry" . }}{{ .Values.images.fluentd.repository }}
tag: {{ .Values.images.fluentd.tag }}
configReloaderImage:
repository: {{ template "system_default_registry" . }}{{ .Values.images.config_reloader.repository }}
tag: {{ .Values.images.config_reloader.tag }}
disablePvc: {{ .Values.disablePvc }}
{{- if .Values.global.psp.enabled }}
security:
podSecurityPolicyCreate: true
roleBasedAccessControlCreate: true
{{- end }}
{{- with .Values.tolerations }}
tolerations: {{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector: {{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.fluentd.resources }}
resources: {{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.fluentd.livenessProbe }}
livenessProbe: {{- toYaml . | nindent 6 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,59 @@
{{- if .Values.additionalLoggingSources.eks.enabled }}
apiVersion: logging.banzaicloud.io/v1beta1
kind: Logging
metadata:
name: {{ .Release.Name }}-eks
namespace: {{ .Release.Namespace }}
labels:
{{ include "logging-operator.labels" . | indent 4 }}
spec:
controlNamespace: {{ .Release.Namespace }}
fluentbit:
image:
repository: {{ template "system_default_registry" . }}{{ .Values.images.fluentbit.repository }}
tag: {{ .Values.images.fluentbit.tag }}
inputTail:
Tag: "eks"
Path: "/var/log/messages"
Parser: "syslog"
{{- if .Values.global.psp.enabled }}
security:
podSecurityPolicyCreate: true
roleBasedAccessControlCreate: true
{{- end }}
{{- $total_tolerations := concat (.Values.tolerations) (.Values.fluentbit.tolerations) }}
{{- with $total_tolerations }}
tolerations: {{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector: {{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.fluentbit.resources }}
resources: {{- toYaml . | nindent 6 }}
{{- end }}
fluentd:
image:
repository: {{ template "system_default_registry" . }}{{ .Values.images.fluentd.repository }}
tag: {{ .Values.images.fluentd.tag }}
configReloaderImage:
repository: {{ template "system_default_registry" . }}{{ .Values.images.config_reloader.repository }}
tag: {{ .Values.images.config_reloader.tag }}
disablePvc: {{ .Values.disablePvc }}
{{- if .Values.global.psp.enabled }}
security:
podSecurityPolicyCreate: true
roleBasedAccessControlCreate: true
{{- end }}
{{- with .Values.tolerations }}
tolerations: {{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector: {{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.fluentd.resources }}
resources: {{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.fluentd.livenessProbe }}
livenessProbe: {{- toYaml . | nindent 6 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,58 @@
{{- if .Values.additionalLoggingSources.gke.enabled }}
apiVersion: logging.banzaicloud.io/v1beta1
kind: Logging
metadata:
name: {{ .Release.Name }}-gke
namespace: {{ .Release.Namespace }}
labels:
{{ include "logging-operator.labels" . | indent 4 }}
spec:
controlNamespace: {{ .Release.Namespace }}
fluentbit:
image:
repository: {{ template "system_default_registry" . }}{{ .Values.images.fluentbit.repository }}
tag: {{ .Values.images.fluentbit.tag }}
inputTail:
Tag: "gke"
Path: "/var/log/kube-proxy.log"
{{- if .Values.global.psp.enabled }}
security:
podSecurityPolicyCreate: true
roleBasedAccessControlCreate: true
{{- end }}
{{- $total_tolerations := concat (.Values.tolerations) (.Values.fluentbit.tolerations) }}
{{- with $total_tolerations }}
tolerations: {{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector: {{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.fluentbit.resources }}
resources: {{- toYaml . | nindent 6 }}
{{- end }}
fluentd:
image:
repository: {{ template "system_default_registry" . }}{{ .Values.images.fluentd.repository }}
tag: {{ .Values.images.fluentd.tag }}
configReloaderImage:
repository: {{ template "system_default_registry" . }}{{ .Values.images.config_reloader.repository }}
tag: {{ .Values.images.config_reloader.tag }}
disablePvc: {{ .Values.disablePvc }}
{{- if .Values.global.psp.enabled }}
security:
podSecurityPolicyCreate: true
roleBasedAccessControlCreate: true
{{- end }}
{{- with .Values.tolerations }}
tolerations: {{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector: {{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.fluentd.resources }}
resources: {{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.fluentd.livenessProbe }}
livenessProbe: {{- toYaml . | nindent 6 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,68 @@
{{- if and .Values.additionalLoggingSources.k3s.enabled (eq .Values.additionalLoggingSources.k3s.container_engine "openrc")}}
apiVersion: logging.banzaicloud.io/v1beta1
kind: Logging
metadata:
name: {{ .Release.Name }}-k3s
namespace: {{ .Release.Namespace }}
labels:
{{ include "logging-operator.labels" . | indent 4 }}
spec:
controlNamespace: {{ .Release.Namespace }}
fluentbit:
image:
repository: {{ template "system_default_registry" . }}{{ .Values.images.fluentbit.repository }}
tag: {{ .Values.images.fluentbit.tag }}
inputTail:
Tag: "k3s"
Path: "/var/log/k3s.log"
extraVolumeMounts:
- source: "/var/log/"
destination: "/var/log"
readOnly: true
{{- if .Values.global.psp.enabled }}
security:
podSecurityPolicyCreate: true
roleBasedAccessControlCreate: true
{{- end }}
{{- $total_tolerations := concat (.Values.tolerations) (.Values.fluentbit.tolerations) }}
{{- with $total_tolerations }}
tolerations:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.fluentbit.resources }}
resources:
{{- toYaml . | nindent 6 }}
{{- end }}
fluentd:
image:
repository: {{ template "system_default_registry" . }}{{ .Values.images.fluentd.repository }}
tag: {{ .Values.images.fluentd.tag }}
configReloaderImage:
repository: {{ template "system_default_registry" . }}{{ .Values.images.config_reloader.repository }}
tag: {{ .Values.images.config_reloader.tag }}
disablePvc: {{ .Values.disablePvc }}
{{- if .Values.global.psp.enabled }}
security:
podSecurityPolicyCreate: true
roleBasedAccessControlCreate: true
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.fluentd.resources }}
resources:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.fluentd.livenessProbe }}
livenessProbe: {{- toYaml . | nindent 6 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,68 @@
{{- if and .Values.additionalLoggingSources.k3s.enabled (eq .Values.additionalLoggingSources.k3s.container_engine "systemd")}}
apiVersion: logging.banzaicloud.io/v1beta1
kind: Logging
metadata:
name: {{ .Release.Name }}-k3s
namespace: {{ .Release.Namespace }}
labels:
{{ include "logging-operator.labels" . | indent 4 }}
spec:
controlNamespace: {{ .Release.Namespace }}
fluentbit:
image:
repository: {{ template "system_default_registry" . }}{{ .Values.images.fluentbit.repository }}
tag: {{ .Values.images.fluentbit.tag }}
inputTail:
Tag: "k3s"
Path: "/var/log/syslog"
extraVolumeMounts:
- source: "/var/log/"
destination: "/var/log"
readOnly: true
{{- if .Values.global.psp.enabled }}
security:
podSecurityPolicyCreate: true
roleBasedAccessControlCreate: true
{{- end }}
{{- $total_tolerations := concat (.Values.tolerations) (.Values.fluentbit.tolerations) }}
{{- with $total_tolerations }}
tolerations:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.fluentbit.resources }}
resources:
{{- toYaml . | nindent 6 }}
{{- end }}
fluentd:
image:
repository: {{ template "system_default_registry" . }}{{ .Values.images.fluentd.repository }}
tag: {{ .Values.images.fluentd.tag }}
configReloaderImage:
repository: {{ template "system_default_registry" . }}{{ .Values.images.config_reloader.repository }}
tag: {{ .Values.images.config_reloader.tag }}
disablePvc: {{ .Values.disablePvc }}
{{- if .Values.global.psp.enabled }}
security:
podSecurityPolicyCreate: true
roleBasedAccessControlCreate: true
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.fluentd.resources }}
resources:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.fluentd.livenessProbe }}
livenessProbe: {{- toYaml . | nindent 6 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,29 @@
{{- if .Values.additionalLoggingSources.rke.enabled }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-rke
labels:
{{ include "logging-operator.labels" . | indent 4 }}
data:
fluent-bit.conf: |
[SERVICE]
Log_Level {{ .Values.additionalLoggingSources.rke.fluentbit.log_level }}
Parsers_File parsers.conf
[INPUT]
Tag rke
Name tail
Path_Key filename
Parser docker
DB /tail-db/tail-containers-state.db
Mem_Buf_Limit {{ .Values.additionalLoggingSources.rke.fluentbit.mem_buffer_limit }}
Path /var/lib/rancher/rke/log/*.log
[OUTPUT]
Name forward
Match *
Host {{ .Release.Name }}-fluentd.{{ .Release.Namespace }}.svc
Port 24240
Retry_Limit False
{{- end }}

View File

@ -0,0 +1,124 @@
{{- if .Values.additionalLoggingSources.rke.enabled }}
{{- $containers := printf "%s/containers/" (default "/var/lib/docker" .Values.global.dockerRootDirectory) }}
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: "{{ .Release.Name }}-rke-aggregator"
namespace: "{{ .Release.Namespace }}"
spec:
selector:
matchLabels:
name: {{ .Release.Name }}-rke-aggregator
template:
metadata:
name: "{{ .Release.Name }}-rke-aggregator"
namespace: "{{ .Release.Namespace }}"
labels:
name: {{ .Release.Name }}-rke-aggregator
spec:
containers:
- name: fluentbit
image: "{{ template "system_default_registry" . }}{{ .Values.images.fluentbit.repository }}:{{ .Values.images.fluentbit.tag }}"
volumeMounts:
- mountPath: /var/lib/rancher/rke/log/
name: indir
- mountPath: {{ $containers }}
name: containers
- mountPath: /tail-db
name: positiondb
- mountPath: /fluent-bit/etc/fluent-bit.conf
name: config
subPath: fluent-bit.conf
{{- if .Values.global.seLinux.enabled }}
securityContext:
seLinuxOptions:
type: rke_logreader_t
{{- end }}
volumes:
- name: indir
hostPath:
path: /var/lib/rancher/rke/log/
type: DirectoryOrCreate
- name: containers
hostPath:
path: {{ $containers }}
type: DirectoryOrCreate
- name: positiondb
emptyDir: {}
- name: config
configMap:
name: "{{ .Release.Name }}-rke"
serviceAccountName: "{{ .Release.Name }}-rke-aggregator"
{{- $total_tolerations := concat (.Values.tolerations) (.Values.fluentbit.tolerations) }}
{{- with $total_tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: "{{ .Release.Name }}-rke-aggregator"
namespace: "{{ .Release.Namespace }}"
{{- if .Values.global.psp.enabled }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: "{{ .Release.Name }}-rke-aggregator"
rules:
- apiGroups:
- policy
resourceNames:
- "{{ .Release.Name }}-rke-aggregator"
resources:
- podsecuritypolicies
verbs:
- use
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: "{{ .Release.Name }}-rke-aggregator"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: "{{ .Release.Name }}-rke-aggregator"
subjects:
- kind: ServiceAccount
name: "{{ .Release.Name }}-rke-aggregator"
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: "{{ .Release.Name }}-rke-aggregator"
namespace: "{{ .Release.Namespace }}"
spec:
allowPrivilegeEscalation: false
allowedHostPaths:
- pathPrefix: {{ $containers }}
readOnly: false
- pathPrefix: /var/lib/rancher/rke/log/
readOnly: false
- pathPrefix: /var/lib/rancher/logging/
readOnly: false
fsGroup:
rule: RunAsAny
readOnlyRootFilesystem: true
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- configMap
- emptyDir
- secret
- hostPath
{{- end }}
{{- end }}

View File

@ -0,0 +1,21 @@
{{- if .Values.additionalLoggingSources.rke2.enabled }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-rke2
labels:
{{ include "logging-operator.labels" . | indent 4 }}
data:
fluent-bit.conf: |
[INPUT]
Name systemd
Tag rke2
Systemd_Filter _SYSTEMD_UNIT=rke2.service
[OUTPUT]
Name forward
Match *
Host {{ .Release.Name }}-fluentd.{{ .Release.Namespace }}.svc
Port 24240
Retry_Limit False
{{- end }}

View File

@ -0,0 +1,104 @@
{{- if .Values.additionalLoggingSources.rke2.enabled }}
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: "{{ .Release.Name }}-rke2-journald-aggregator"
namespace: "{{ .Release.Namespace }}"
spec:
selector:
matchLabels:
name: {{ .Release.Name }}-rke2-journald-aggregator
template:
metadata:
name: "{{ .Release.Name }}-rke2-journald-aggregator"
namespace: "{{ .Release.Namespace }}"
labels:
name: {{ .Release.Name }}-rke2-journald-aggregator
spec:
containers:
- name: fluentd
image: "{{ template "system_default_registry" . }}{{ .Values.images.fluentbit.repository }}:{{ .Values.images.fluentbit.tag }}"
{{- if .Values.global.seLinux.enabled }}
securityContext:
seLinuxOptions:
type: rke_logreader_t
{{- end }}
volumeMounts:
- mountPath: /fluent-bit/etc/
name: config
- mountPath: /run/log/journal
name: journal
readOnly: true
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: "{{ .Release.Name }}-rke2-journald-aggregator"
volumes:
- name: config
configMap:
name: "{{ .Release.Name }}-rke2"
- name: journal
hostPath:
path: /run/log/journal
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: "{{ .Release.Name }}-rke2-journald-aggregator"
namespace: "{{ .Release.Namespace }}"
{{- if .Values.global.psp.enabled }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: "{{ .Release.Name }}-rke2-journald-aggregator"
rules:
- apiGroups:
- policy
resourceNames:
- "{{ .Release.Name }}-rke2-journald-aggregator"
resources:
- podsecuritypolicies
verbs:
- use
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: "{{ .Release.Name }}-rke2-journald-aggregator"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: "{{ .Release.Name }}-rke2-journald-aggregator"
subjects:
- kind: ServiceAccount
name: "{{ .Release.Name }}-rke2-journald-aggregator"
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: "{{ .Release.Name }}-rke2-journald-aggregator"
namespace: "{{ .Release.Namespace }}"
spec:
allowPrivilegeEscalation: false
fsGroup:
rule: RunAsAny
readOnlyRootFilesystem: true
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- configMap
- emptyDir
- secret
- hostPath
{{- end }}
{{- end }}

View File

@ -0,0 +1,73 @@
{{- if .Values.additionalLoggingSources.rke2.enabled }}
apiVersion: logging.banzaicloud.io/v1beta1
kind: Logging
metadata:
name: {{ .Release.Name }}-rke2-containers
namespace: {{ .Release.Namespace }}
spec:
controlNamespace: {{ .Release.Namespace }}
fluentbit:
image:
repository: {{ template "system_default_registry" . }}{{ .Values.images.fluentbit.repository }}
tag: {{ .Values.images.fluentbit.tag }}
inputTail:
Tag: "rke2"
Path: "/var/log/containers/*rke*.log"
extraVolumeMounts:
- source: "/var/log/containers/"
destination: "/var/log/containers/"
readOnly: true
{{- if or .Values.global.psp.enabled .Values.global.seLinux.enabled }}
security:
{{- end }}
{{- if or .Values.global.psp.enabled }}
podSecurityPolicyCreate: true
roleBasedAccessControlCreate: true
{{- end }}
{{- if .Values.global.seLinux.enabled }}
securityContext:
seLinuxOptions:
type: rke_logreader_t
{{- end }}
{{- $total_tolerations := concat (.Values.tolerations) (.Values.fluentbit.tolerations) }}
{{- with $total_tolerations }}
tolerations:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.fluentbit.resources }}
resources:
{{- toYaml . | nindent 6 }}
{{- end }}
fluentd:
image:
repository: {{ template "system_default_registry" . }}{{ .Values.images.fluentd.repository }}
tag: {{ .Values.images.fluentd.tag }}
configReloaderImage:
repository: {{ template "system_default_registry" . }}{{ .Values.images.config_reloader.repository }}
tag: {{ .Values.images.config_reloader.tag }}
disablePvc: {{ .Values.disablePvc }}
{{- if .Values.global.psp.enabled }}
security:
podSecurityPolicyCreate: true
roleBasedAccessControlCreate: true
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.fluentd.resources }}
resources:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.fluentd.livenessProbe }}
livenessProbe: {{- toYaml . | nindent 6 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,74 @@
{{- $containers := printf "%s/containers/" (default "/var/lib/docker" .Values.global.dockerRootDirectory) }}
apiVersion: logging.banzaicloud.io/v1beta1
kind: Logging
metadata:
name: {{ .Release.Name }}
namespace: {{ .Release.Namespace }}
labels:
{{ include "logging-operator.labels" . | indent 4 }}
spec:
controlNamespace: {{ .Release.Namespace }}
fluentbit:
image:
repository: {{ template "system_default_registry" . }}{{ .Values.images.fluentbit.repository }}
tag: {{ .Values.images.fluentbit.tag }}
{{- if or .Values.global.psp.enabled .Values.global.seLinux.enabled }}
security:
{{- end }}
{{- if .Values.global.psp.enabled }}
podSecurityPolicyCreate: true
roleBasedAccessControlCreate: true
{{- end }}
{{- if .Values.global.seLinux.enabled }}
securityContext:
seLinuxOptions:
type: rke_logreader_t
{{- end }}
{{- if .Values.global.dockerRootDirectory }}
mountPath: {{ $containers }}
extraVolumeMounts:
- source: {{ $containers }}
destination: {{ $containers }}
readOnly: true
{{- end }}
{{- $total_tolerations := concat (.Values.tolerations) (.Values.fluentbit.tolerations) }}
{{- with $total_tolerations }}
tolerations:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.fluentbit.resources }}
resources:
{{- toYaml . | nindent 6 }}
{{- end }}
fluentd:
image:
repository: {{ template "system_default_registry" . }}{{ .Values.images.fluentd.repository }}
tag: {{ .Values.images.fluentd.tag }}
configReloaderImage:
repository: {{ template "system_default_registry" . }}{{ .Values.images.config_reloader.repository }}
tag: {{ .Values.images.config_reloader.tag }}
disablePvc: {{ .Values.disablePvc }}
{{- if .Values.global.psp.enabled }}
security:
podSecurityPolicyCreate: true
roleBasedAccessControlCreate: true
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.fluentd.resources }}
resources:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.fluentd.livenessProbe }}
livenessProbe: {{- toYaml . | nindent 6 }}
{{- end }}

View File

@ -0,0 +1,33 @@
{{ if and .Values.rbac.enabled .Values.rbac.psp.enabled }}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: psp.logging-operator
namespace: {{ include "logging-operator.namespace" . }}
annotations:
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default,runtime/default'
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default'
labels:
{{ include "logging-operator.labels" . | indent 4 }}
spec:
readOnlyRootFilesystem: true
privileged: false
allowPrivilegeEscalation: false
runAsUser:
rule: MustRunAsNonRoot
fsGroup:
rule: MustRunAs
ranges:
- min: 1
max: 65535
supplementalGroups:
rule: MustRunAs
ranges:
- min: 1
max: 65535
seLinux:
rule: RunAsAny
volumes:
- secret
- configMap
{{ end }}

View File

@ -0,0 +1,20 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "logging-operator.fullname" . }}
namespace: {{ include "logging-operator.namespace" . }}
labels:
{{ include "logging-operator.labels" . | indent 4 }}
spec:
type: ClusterIP
{{- with .Values.http.service.clusterIP }}
clusterIP: {{ . }}
{{- end }}
ports:
- port: {{ .Values.http.port }}
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: {{ include "logging-operator.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}

View File

@ -0,0 +1,30 @@
{{ if .Values.monitoring.serviceMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ include "logging-operator.fullname" . }}
namespace: {{ include "logging-operator.namespace" . }}
labels:
{{ include "logging-operator.labels" . | indent 4 }}
{{- with .Values.monitoring.serviceMonitor.additionalLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
selector:
matchLabels:
{{ include "logging-operator.labels" . | indent 6 }}
endpoints:
- port: http
path: /metrics
{{- with .Values.monitoring.serviceMonitor.metricsRelabelings }}
metricRelabelings:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.monitoring.serviceMonitor.relabelings }}
relabelings:
{{- toYaml . | nindent 4 }}
{{- end }}
namespaceSelector:
matchNames:
- {{ include "logging-operator.namespace" . }}
{{- end }}

View File

@ -0,0 +1,10 @@
{{- if .Values.rbac.enabled }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "logging-operator.fullname" . }}
namespace: {{ include "logging-operator.namespace" . }}
labels:
{{ include "logging-operator.labels" . | indent 4 }}
{{- end }}

View File

@ -0,0 +1,35 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: "logging-admin"
labels:
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rules:
- apiGroups:
- "logging.banzaicloud.io"
resources:
- flows
- outputs
verbs:
- "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: "logging-view"
labels:
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rbac.authorization.k8s.io/aggregate-to-view: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rules:
- apiGroups:
- "logging.banzaicloud.io"
resources:
- flows
- outputs
- clusterflows
- clusteroutputs
verbs:
- get
- list
- watch

View File

@ -0,0 +1,18 @@
#{{- if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 -}}
# {{- $found := dict -}}
# {{- set $found "logging.banzaicloud.io/v1beta1/ClusterFlow" false -}}
# {{- set $found "logging.banzaicloud.io/v1beta1/ClusterOutput" false -}}
# {{- set $found "logging.banzaicloud.io/v1beta1/Flow" false -}}
# {{- set $found "logging.banzaicloud.io/v1beta1/Logging" false -}}
# {{- set $found "logging.banzaicloud.io/v1beta1/Output" false -}}
# {{- range .Capabilities.APIVersions -}}
# {{- if hasKey $found (toString .) -}}
# {{- set $found (toString .) true -}}
# {{- end -}}
# {{- end -}}
# {{- range $_, $exists := $found -}}
# {{- if (eq $exists false) -}}
# {{- required "Required CRDs are missing. Please install the corresponding CRD chart before installing this chart." "" -}}
# {{- end -}}
# {{- end -}}
#{{- end -}}

View File

@ -0,0 +1,5 @@
#{{- if .Values.global.dockerRootDirectory }}
#{{- if or (hasSuffix "/containers" .Values.global.dockerRootDirectory) (hasSuffix "/" .Values.global.dockerRootDirectory) }}
#{{- required "global.dockerRootDirectory must not end with suffix: '/' or '/containers'" "" -}}
#{{- end }}
#{{- end }}

View File

@ -0,0 +1,156 @@
# Default values for logging-operator.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: rancher/mirrored-banzaicloud-logging-operator
tag: 3.9.0
pullPolicy: IfNotPresent
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
namespaceOverride: ""
annotations: {}
## Deploy CRDs used by Logging Operator.
##
createCustomResource: false
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector:
kubernetes.io/os: linux
tolerations:
- key: cattle.io/os
operator: "Equal"
value: "linux"
effect: NoSchedule
affinity: {}
http:
# http listen port number
port: 8080
# Service definition for query http service
service:
type: ClusterIP
clusterIP: None
# Annotations to query http service
annotations: {}
# Labels to query http service
labels: {}
# These "rbac" settings match the upstream defaults. For only using psp in the overlay files, which
# include the default Logging CRs created, see the "global.psp" setting. To enable psp for the entire
# chart, enable both "rbac.psp" and "global.psp" (this may require further changes to the chart).
rbac:
enabled: true
psp:
enabled: false
## SecurityContext holds pod-level security attributes and common container settings.
## This defaults to non root user with uid 1000 and gid 2000. *v1.PodSecurityContext false
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
##
podSecurityContext: {}
# runAsNonRoot: true
# runAsUser: 1000
# fsGroup: 2000
securityContext: {}
# allowPrivilegeEscalation: false
# readOnlyRootFilesystem: true
# capabilities:
# drop: ["ALL"]
## Operator priorityClassName
##
priorityClassName: {}
monitoring:
# Create a Prometheus Operator ServiceMonitor object
serviceMonitor:
enabled: false
additionalLabels: {}
metricRelabelings: []
relabelings: []
disablePvc: true
additionalLoggingSources:
rke:
enabled: false
fluentbit:
log_level: "info"
mem_buffer_limit: "5MB"
rke2:
enabled: false
k3s:
enabled: false
container_engine: "systemd"
aks:
enabled: false
eks:
enabled: false
gke:
enabled: false
images:
config_reloader:
repository: rancher/mirrored-jimmidyson-configmap-reload
tag: v0.4.0
fluentbit:
repository: rancher/mirrored-fluent-fluent-bit
tag: 1.6.10
fluentbit_debug:
repository: rancher/mirrored-fluent-fluent-bit
tag: 1.6.10-debug
fluentd:
repository: rancher/mirrored-banzaicloud-fluentd
tag: v1.11.5-alpine-9
# These "fluentd" and "fluentbit" settings apply to every Logging CR, including vendor Logging CRs
# enabled in "additionalLoggingSources". Changing these affects every Logging CR installed.
fluentd:
resources: {}
livenessProbe:
tcpSocket:
port: 24240
initialDelaySeconds: 30
periodSeconds: 15
fluentbit:
resources: {}
tolerations:
- key: node-role.kubernetes.io/controlplane
value: "true"
effect: NoSchedule
- key: node-role.kubernetes.io/etcd
value: "true"
effect: NoExecute
global:
cattle:
systemDefaultRegistry: ""
# Change the "dockerRootDirectory" if the default Docker directory has changed.
dockerRootDirectory: ""
# This psp setting differs from the upstream "rbac.psp" by only enabling psp settings for the
# overlay files, which include the Logging CRs created, whereas the upstream "rbac.psp" affects the
# logging operator.
psp:
enabled: true
seLinux:
enabled: false

View File

@ -0,0 +1,10 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/namespace: cattle-monitoring-system
catalog.cattle.io/release-name: rancher-monitoring-crd
apiVersion: v1
description: Installs the CRDs for rancher-monitoring.
name: rancher-monitoring-crd
type: application
version: 9.4.204-rc09

View File

@ -0,0 +1,2 @@
# rancher-monitoring-crd
A Rancher chart that installs the CRDs used by rancher-monitoring.

View File

@ -0,0 +1,260 @@
# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/release-0.38/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.2.4
creationTimestamp: null
name: podmonitors.monitoring.coreos.com
spec:
group: monitoring.coreos.com
names:
kind: PodMonitor
listKind: PodMonitorList
plural: podmonitors
singular: podmonitor
preserveUnknownFields: false
scope: Namespaced
validation:
openAPIV3Schema:
description: PodMonitor defines monitoring for a set of pods.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: Specification of desired Pod selection for target discovery
by Prometheus.
properties:
jobLabel:
description: The label to use to retrieve the job name from.
type: string
namespaceSelector:
description: Selector to select which namespaces the Endpoints objects
are discovered from.
properties:
any:
description: Boolean describing whether all namespaces are selected
in contrast to a list restricting them.
type: boolean
matchNames:
description: List of namespace names.
items:
type: string
type: array
type: object
podMetricsEndpoints:
description: A list of endpoints allowed as part of this PodMonitor.
items:
description: PodMetricsEndpoint defines a scrapeable endpoint of a
Kubernetes Pod serving Prometheus metrics.
properties:
honorLabels:
description: HonorLabels chooses the metric's labels on collisions
with target labels.
type: boolean
honorTimestamps:
description: HonorTimestamps controls whether Prometheus respects
the timestamps present in scraped data.
type: boolean
interval:
description: Interval at which metrics should be scraped
type: string
metricRelabelings:
description: MetricRelabelConfigs to apply to samples before ingestion.
items:
description: 'RelabelConfig allows dynamic rewriting of the
label set, being applied to samples before ingestion. It defines
`<metric_relabel_configs>`-section of Prometheus configuration.
More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs'
properties:
action:
description: Action to perform based on regex matching.
Default is 'replace'
type: string
modulus:
description: Modulus to take of the hash of the source label
values.
format: int64
type: integer
regex:
description: Regular expression against which the extracted
value is matched. Default is '(.*)'
type: string
replacement:
description: Replacement value against which a regex replace
is performed if the regular expression matches. Regex
capture groups are available. Default is '$1'
type: string
separator:
description: Separator placed between concatenated source
label values. default is ';'.
type: string
sourceLabels:
description: The source labels select values from existing
labels. Their content is concatenated using the configured
separator and matched against the configured regular expression
for the replace, keep, and drop actions.
items:
type: string
type: array
targetLabel:
description: Label to which the resulting value is written
in a replace action. It is mandatory for replace actions.
Regex capture groups are available.
type: string
type: object
type: array
params:
additionalProperties:
items:
type: string
type: array
description: Optional HTTP URL parameters
type: object
path:
description: HTTP path to scrape for metrics.
type: string
port:
description: Name of the pod port this endpoint refers to. Mutually
exclusive with targetPort.
type: string
proxyUrl:
description: ProxyURL eg http://proxyserver:2195 Directs scrapes
to proxy through this endpoint.
type: string
relabelings:
description: 'RelabelConfigs to apply to samples before ingestion.
More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config'
items:
description: 'RelabelConfig allows dynamic rewriting of the
label set, being applied to samples before ingestion. It defines
`<metric_relabel_configs>`-section of Prometheus configuration.
More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs'
properties:
action:
description: Action to perform based on regex matching.
Default is 'replace'
type: string
modulus:
description: Modulus to take of the hash of the source label
values.
format: int64
type: integer
regex:
description: Regular expression against which the extracted
value is matched. Default is '(.*)'
type: string
replacement:
description: Replacement value against which a regex replace
is performed if the regular expression matches. Regex
capture groups are available. Default is '$1'
type: string
separator:
description: Separator placed between concatenated source
label values. default is ';'.
type: string
sourceLabels:
description: The source labels select values from existing
labels. Their content is concatenated using the configured
separator and matched against the configured regular expression
for the replace, keep, and drop actions.
items:
type: string
type: array
targetLabel:
description: Label to which the resulting value is written
in a replace action. It is mandatory for replace actions.
Regex capture groups are available.
type: string
type: object
type: array
scheme:
description: HTTP scheme to use for scraping.
type: string
scrapeTimeout:
description: Timeout after which the scrape is ended
type: string
targetPort:
anyOf:
- type: integer
- type: string
description: 'Deprecated: Use ''port'' instead.'
x-kubernetes-int-or-string: true
type: object
type: array
podTargetLabels:
description: PodTargetLabels transfers labels on the Kubernetes Pod
onto the target.
items:
type: string
type: array
sampleLimit:
description: SampleLimit defines per-scrape limit on number of scraped
samples that will be accepted.
format: int64
type: integer
selector:
description: Selector to select Pod objects.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements.
The requirements are ANDed.
items:
description: A label selector requirement is a selector that contains
values, a key, and an operator that relates the key and values.
properties:
key:
description: key is the label key that the selector applies
to.
type: string
operator:
description: operator represents a key's relationship to a
set of values. Valid operators are In, NotIn, Exists and
DoesNotExist.
type: string
values:
description: values is an array of string values. If the operator
is In or NotIn, the values array must be non-empty. If the
operator is Exists or DoesNotExist, the values array must
be empty. This array is replaced during a strategic merge
patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs. A single
{key,value} in the matchLabels map is equivalent to an element
of matchExpressions, whose key field is "key", the operator is
"In", and the values array contains only "value". The requirements
are ANDed.
type: object
type: object
required:
- podMetricsEndpoints
- selector
type: object
required:
- spec
type: object
version: v1
versions:
- name: v1
served: true
storage: true

View File

@ -0,0 +1,91 @@
# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/release-0.38/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.2.4
creationTimestamp: null
name: prometheusrules.monitoring.coreos.com
spec:
group: monitoring.coreos.com
names:
kind: PrometheusRule
listKind: PrometheusRuleList
plural: prometheusrules
singular: prometheusrule
preserveUnknownFields: false
scope: Namespaced
validation:
openAPIV3Schema:
description: PrometheusRule defines alerting rules for a Prometheus instance
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: Specification of desired alerting rule definitions for Prometheus.
properties:
groups:
description: Content of Prometheus rule file
items:
description: 'RuleGroup is a list of sequentially evaluated recording
and alerting rules. Note: PartialResponseStrategy is only used by
ThanosRuler and will be ignored by Prometheus instances. Valid
values for this field are ''warn'' or ''abort''. More info: https://github.com/thanos-io/thanos/blob/master/docs/components/rule.md#partial-response'
properties:
interval:
type: string
name:
type: string
partial_response_strategy:
type: string
rules:
items:
description: Rule describes an alerting or recording rule.
properties:
alert:
type: string
annotations:
additionalProperties:
type: string
type: object
expr:
anyOf:
- type: integer
- type: string
x-kubernetes-int-or-string: true
for:
type: string
labels:
additionalProperties:
type: string
type: object
record:
type: string
required:
- expr
type: object
type: array
required:
- name
- rules
type: object
type: array
type: object
required:
- spec
type: object
version: v1
versions:
- name: v1
served: true
storage: true

View File

@ -0,0 +1,459 @@
# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/release-0.38/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.2.4
creationTimestamp: null
name: servicemonitors.monitoring.coreos.com
spec:
group: monitoring.coreos.com
names:
kind: ServiceMonitor
listKind: ServiceMonitorList
plural: servicemonitors
singular: servicemonitor
preserveUnknownFields: false
scope: Namespaced
validation:
openAPIV3Schema:
description: ServiceMonitor defines monitoring for a set of services.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: Specification of desired Service selection for target discovery
by Prometheus.
properties:
endpoints:
description: A list of endpoints allowed as part of this ServiceMonitor.
items:
description: Endpoint defines a scrapeable endpoint serving Prometheus
metrics.
properties:
basicAuth:
description: 'BasicAuth allow an endpoint to authenticate over
basic authentication More info: https://prometheus.io/docs/operating/configuration/#endpoints'
properties:
password:
description: The secret in the service monitor namespace that
contains the password for authentication.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
username:
description: The secret in the service monitor namespace that
contains the username for authentication.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
type: object
bearerTokenFile:
description: File to read bearer token for scraping targets.
type: string
bearerTokenSecret:
description: Secret to mount to read bearer token for scraping
targets. The secret needs to be in the same namespace as the
service monitor and accessible by the Prometheus Operator.
properties:
key:
description: The key of the secret to select from. Must be
a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must be
defined
type: boolean
required:
- key
type: object
honorLabels:
description: HonorLabels chooses the metric's labels on collisions
with target labels.
type: boolean
honorTimestamps:
description: HonorTimestamps controls whether Prometheus respects
the timestamps present in scraped data.
type: boolean
interval:
description: Interval at which metrics should be scraped
type: string
metricRelabelings:
description: MetricRelabelConfigs to apply to samples before ingestion.
items:
description: 'RelabelConfig allows dynamic rewriting of the
label set, being applied to samples before ingestion. It defines
`<metric_relabel_configs>`-section of Prometheus configuration.
More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs'
properties:
action:
description: Action to perform based on regex matching.
Default is 'replace'
type: string
modulus:
description: Modulus to take of the hash of the source label
values.
format: int64
type: integer
regex:
description: Regular expression against which the extracted
value is matched. Default is '(.*)'
type: string
replacement:
description: Replacement value against which a regex replace
is performed if the regular expression matches. Regex
capture groups are available. Default is '$1'
type: string
separator:
description: Separator placed between concatenated source
label values. default is ';'.
type: string
sourceLabels:
description: The source labels select values from existing
labels. Their content is concatenated using the configured
separator and matched against the configured regular expression
for the replace, keep, and drop actions.
items:
type: string
type: array
targetLabel:
description: Label to which the resulting value is written
in a replace action. It is mandatory for replace actions.
Regex capture groups are available.
type: string
type: object
type: array
params:
additionalProperties:
items:
type: string
type: array
description: Optional HTTP URL parameters
type: object
path:
description: HTTP path to scrape for metrics.
type: string
port:
description: Name of the service port this endpoint refers to.
Mutually exclusive with targetPort.
type: string
proxyUrl:
description: ProxyURL eg http://proxyserver:2195 Directs scrapes
to proxy through this endpoint.
type: string
relabelings:
description: 'RelabelConfigs to apply to samples before scraping.
More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config'
items:
description: 'RelabelConfig allows dynamic rewriting of the
label set, being applied to samples before ingestion. It defines
`<metric_relabel_configs>`-section of Prometheus configuration.
More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs'
properties:
action:
description: Action to perform based on regex matching.
Default is 'replace'
type: string
modulus:
description: Modulus to take of the hash of the source label
values.
format: int64
type: integer
regex:
description: Regular expression against which the extracted
value is matched. Default is '(.*)'
type: string
replacement:
description: Replacement value against which a regex replace
is performed if the regular expression matches. Regex
capture groups are available. Default is '$1'
type: string
separator:
description: Separator placed between concatenated source
label values. default is ';'.
type: string
sourceLabels:
description: The source labels select values from existing
labels. Their content is concatenated using the configured
separator and matched against the configured regular expression
for the replace, keep, and drop actions.
items:
type: string
type: array
targetLabel:
description: Label to which the resulting value is written
in a replace action. It is mandatory for replace actions.
Regex capture groups are available.
type: string
type: object
type: array
scheme:
description: HTTP scheme to use for scraping.
type: string
scrapeTimeout:
description: Timeout after which the scrape is ended
type: string
targetPort:
anyOf:
- type: integer
- type: string
description: Name or number of the pod port this endpoint refers
to. Mutually exclusive with port.
x-kubernetes-int-or-string: true
tlsConfig:
description: TLS configuration to use when scraping the endpoint
properties:
ca:
description: Stuct containing the CA cert to use for the targets.
properties:
configMap:
description: ConfigMap containing data to use for the
targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the ConfigMap or its
key must be defined
type: boolean
required:
- key
type: object
secret:
description: Secret containing data to use for the targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
type: object
caFile:
description: Path to the CA cert in the Prometheus container
to use for the targets.
type: string
cert:
description: Struct containing the client cert file for the
targets.
properties:
configMap:
description: ConfigMap containing data to use for the
targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the ConfigMap or its
key must be defined
type: boolean
required:
- key
type: object
secret:
description: Secret containing data to use for the targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
type: object
certFile:
description: Path to the client cert file in the Prometheus
container for the targets.
type: string
insecureSkipVerify:
description: Disable target certificate validation.
type: boolean
keyFile:
description: Path to the client key file in the Prometheus
container for the targets.
type: string
keySecret:
description: Secret containing the client key file for the
targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
serverName:
description: Used to verify the hostname for the targets.
type: string
type: object
type: object
type: array
jobLabel:
description: The label to use to retrieve the job name from.
type: string
namespaceSelector:
description: Selector to select which namespaces the Endpoints objects
are discovered from.
properties:
any:
description: Boolean describing whether all namespaces are selected
in contrast to a list restricting them.
type: boolean
matchNames:
description: List of namespace names.
items:
type: string
type: array
type: object
podTargetLabels:
description: PodTargetLabels transfers labels on the Kubernetes Pod
onto the target.
items:
type: string
type: array
sampleLimit:
description: SampleLimit defines per-scrape limit on number of scraped
samples that will be accepted.
format: int64
type: integer
selector:
description: Selector to select Endpoints objects.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements.
The requirements are ANDed.
items:
description: A label selector requirement is a selector that contains
values, a key, and an operator that relates the key and values.
properties:
key:
description: key is the label key that the selector applies
to.
type: string
operator:
description: operator represents a key's relationship to a
set of values. Valid operators are In, NotIn, Exists and
DoesNotExist.
type: string
values:
description: values is an array of string values. If the operator
is In or NotIn, the values array must be non-empty. If the
operator is Exists or DoesNotExist, the values array must
be empty. This array is replaced during a strategic merge
patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs. A single
{key,value} in the matchLabels map is equivalent to an element
of matchExpressions, whose key field is "key", the operator is
"In", and the values array contains only "value". The requirements
are ANDed.
type: object
type: object
targetLabels:
description: TargetLabels transfers labels on the Kubernetes Service
onto the target.
items:
type: string
type: array
required:
- endpoints
- selector
type: object
required:
- spec
type: object
version: v1
versions:
- name: v1
served: true
storage: true

View File

@ -0,0 +1,29 @@
# Rancher
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- end -}}
{{- end -}}
# Windows Support
{{/*
Windows cluster will add default taint for linux nodes,
add below linux tolerations to workloads could be scheduled to those linux nodes
*/}}
{{- define "linux-node-tolerations" -}}
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
{{- end -}}
{{- define "linux-node-selector" -}}
{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}}
beta.kubernetes.io/os: linux
{{- else -}}
kubernetes.io/os: linux
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,96 @@
apiVersion: batch/v1
kind: Job
metadata:
name: {{ .Chart.Name }}-create
namespace: {{ .Release.Namespace }}
labels:
app: {{ .Chart.Name }}
annotations:
"helm.sh/hook": post-install, post-upgrade, post-rollback
"helm.sh/hook-delete-policy": hook-succeeded
spec:
template:
metadata:
name: {{ .Chart.Name }}-create
labels:
app: {{ .Chart.Name }}
spec:
serviceAccountName: {{ .Chart.Name }}-manager
securityContext:
runAsNonRoot: true
runAsUser: 1000
containers:
- name: create-crds
image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}
imagePullPolicy: IfNotPresent
command:
- /bin/kubectl
- apply
- -f
- /etc/config/crd-manifest.yaml
volumeMounts:
- name: crd-manifest
readOnly: true
mountPath: /etc/config
restartPolicy: OnFailure
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
volumes:
- name: crd-manifest
configMap:
name: {{ .Chart.Name }}-manifest
---
apiVersion: batch/v1
kind: Job
metadata:
name: {{ .Chart.Name }}-delete
namespace: {{ .Release.Namespace }}
labels:
app: {{ .Chart.Name }}
annotations:
"helm.sh/hook": pre-delete
"helm.sh/hook-delete-policy": hook-succeeded
spec:
template:
metadata:
name: {{ .Chart.Name }}-delete
labels:
app: {{ .Chart.Name }}
spec:
serviceAccountName: {{ .Chart.Name }}-manager
securityContext:
runAsNonRoot: true
runAsUser: 1000
initContainers:
- name: remove-finalizers
image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}
imagePullPolicy: IfNotPresent
command:
- /bin/kubectl
- apply
- -f
- /etc/config/crd-manifest.yaml
volumeMounts:
- name: crd-manifest
readOnly: true
mountPath: /etc/config
containers:
- name: delete-crds
image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}
imagePullPolicy: IfNotPresent
command:
- /bin/kubectl
- delete
- -f
- /etc/config/crd-manifest.yaml
volumeMounts:
- name: crd-manifest
readOnly: true
mountPath: /etc/config
restartPolicy: OnFailure
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
volumes:
- name: crd-manifest
configMap:
name: {{ .Chart.Name }}-manifest

View File

@ -0,0 +1,14 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Chart.Name }}-manifest
namespace: {{ .Release.Namespace }}
data:
crd-manifest.yaml: |
{{- $currentScope := . -}}
{{- $crds := (.Files.Glob "crd-manifest/**.yaml") -}}
{{- range $path, $_ := $crds -}}
{{- with $currentScope -}}
{{ .Files.Get $path | nindent 4 }}
---
{{- end -}}{{- end -}}

View File

@ -0,0 +1,72 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ .Chart.Name }}-manager
labels:
app: {{ .Chart.Name }}-manager
rules:
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs: ['create', 'get', 'patch', 'delete']
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames:
- {{ .Chart.Name }}-manager
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ .Chart.Name }}-manager
labels:
app: {{ .Chart.Name }}-manager
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ .Chart.Name }}-manager
subjects:
- kind: ServiceAccount
name: {{ .Chart.Name }}-manager
namespace: {{ .Release.Namespace }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ .Chart.Name }}-manager
namespace: {{ .Release.Namespace }}
labels:
app: {{ .Chart.Name }}-manager
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ .Chart.Name }}-manager
namespace: {{ .Release.Namespace }}
labels:
app: {{ .Chart.Name }}-manager
spec:
privileged: false
allowPrivilegeEscalation: false
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'MustRunAsNonRoot'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
readOnlyRootFilesystem: false
volumes:
- 'configMap'
- 'secret'

View File

@ -0,0 +1,11 @@
# Default values for rancher-monitoring-crd.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
global:
cattle:
systemDefaultRegistry: ""
image:
repository: rancher/kubectl
tag: v1.20.2

View File

@ -0,0 +1,26 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
# helm/charts
OWNERS
hack/
ci/
kube-prometheus-*.tgz

View File

@ -0,0 +1,47 @@
# Changelog
All notable changes from the upstream Prometheus Operator chart will be added to this file.
## [Package Version 00] - 2020-07-19
### Added
- Added [Prometheus Adapter](https://github.com/helm/charts/tree/master/stable/prometheus-adapter) as a dependency to the upstream Prometheus Operator chart to allow users to expose custom metrics from the default Prometheus instance deployed by this chart
- Remove `prometheus-operator/cleanup-crds.yaml` and `prometheus-operator/crds.yaml` from the Prometheus Operator upstream chart in favor of just using the CRD directory to install the CRDs.
- Added support for `rkeControllerManager`, `rkeScheduler`, `rkeProxy`, and `rkeEtcd` PushProx exporters for monitoring k8s components within RKE clusters
- Added support for a `k3sServer` PushProx exporter that monitors k3s server components (`kubeControllerManager`, `kubeScheduler`, and `kubeProxy`) within k3s clusters
- Added support for `kubeAdmControllerManager`, `kubeAdmScheduler`, `kubeAdmProxy`, and `kubeAdmEtcd` PushProx exporters for monitoring k8s components within kubeAdm clusters
- Added support for `rke2ControllerManager`, `rke2Scheduler`, `rke2Proxy`, and `rke2Etcd` PushProx exporters for monitoring k8s components within rke2 clusters
- Exposed `prometheus.prometheusSpec.ignoreNamespaceSelectors` on values.yaml and set it to `false` by default. This value instructs the default Prometheus server deployed with this chart to ignore the `namespaceSelector` field within any created ServiceMonitor or PodMonitor CRs that it selects. This prevents ServiceMonitors and PodMonitors from configuring the Prometheus scrape configuration to monitor resources outside the namespace that they are deployed in; if a user needs to have one ServiceMonitor / PodMonitor monitor resources within several namespaces (such as the resources that are used to monitor Istio in a default installation), they should not enable this option since it would require them to create one ServiceMonitor / PodMonitor CR per namespace that they would like to monitor. Relevant fields were also updated in the default README.md.
- Added `grafana.sidecar.dashboards.searchNamespace` to `values.yaml` with a default value of `cattle-dashboards`. The namespace provided should contain all ConfigMaps with the label `grafana_dashboard` and will be searched by the Grafana Dashboards sidecar for updates. The namespace specified is also created along with this deployment. All default dashboard ConfigMaps have been relocated from the deployment namespace to the namespace specified
- Added `monitoring-admin`, `monitoring-edit`, and `monitoring-view` default `ClusterRoles` to allow admins to assign roles to users to interact with Prometheus Operator CRs. These can be enabled by setting `.Values.global.rbac.userRoles.create` (default: `true`). In a typical RBAC setup, you might want to use a `ClusterRoleBinding` to bind these roles to a Subject to allow them to set up or view `ServiceMonitors` / `PodMonitors` / `PrometheusRules` and view `Prometheus` or `Alertmanager` CRs across the cluster. If `.Values.global.rbac.userRoles.aggregateRolesForRBAC` is enabled, these ClusterRoles will aggregate into the respective default ClusterRoles provided by Kubernetes
- Added `monitoring-config-admin`, `monitoring-config-edit` and `monitoring-config-view` default `Roles` to allow admins to assign roles to users to be able to edit / view `Secrets` and `ConfigMaps` within the `cattle-monitoring-system` namespace. These can be enabled by setting `.Values.global.rbac.userRoles.create` (default: `true`). In a typical RBAC setup, you might want to use a `RoleBinding` to bind these roles to a Subject within the `cattle-monitoring-system` namespace to allow them to modify Secrets / ConfigMaps tied to the deployment, such as your Alertmanager Config Secret.
- Added `monitoring-dashboard-admin`, `monitoring-dashboard-edit` and `monitoring-dashboard-view` default `Roles` to allow admins to assign roles to users to be able to edit / view `ConfigMaps` within the `cattle-dashboards` namespace. These can be enabled by setting `.Values.global.rbac.userRoles.create` (default: `true`) and deploying Grafana as part of this chart. In a typical RBAC setup, you might want to use a `RoleBinding` to bind these roles to a Subject within the `cattle-dashboards` namespace to allow them to create / modify ConfigMaps that contain the JSON used to persist Grafana Dashboards on the cluster.
- Added default resource limits for `Prometheus Operator`, `Prometheus`, `AlertManager`, `Grafana`, `kube-state-metrics`, `node-exporter`
- Added a default template `rancher_defaults.tmpl` to AlertManager that Rancher will offer to users in order to help configure the way alerts are rendered on a notifier. Also updated the default template deployed with this chart to reference that template and added an example of a Slack config using this template as a comment in the `values.yaml`.
- Added support for private registries via introducing a new field for `global.cattle.systemDefaultRegistry` that, if supplied, will automatically be prepended onto every image used by the chart.
- Added a default `nginx` proxy container deployed with Grafana whose config is set in the `ConfigMap` located in `charts/grafana/templates/nginx-config.yaml`. The purpose of this container is to make it possible to view Grafana's UI through a proxy that has a subpath (e.g. Rancher's proxy). This proxy container is set to listen on port `8080` (with a `portName` of `nginx-http` instead of the default `service`), which is also where the Grafana service will now point to, and will forward all requests to the Grafana container listening on the default port `3000`.
- Added a default `nginx` proxy container deployed with Prometheus whose config is set in the `ConfigMap` located in `templates/prometheus/nginx-config.yaml`. The purpose of this container is to make it possible to view Prometheus's UI through a proxy that has a subpath (e.g. Rancher's proxy). This proxy container is set to listen on port `8080` (with a `portName` of `nginx-http` instead of the default `web`), which is also where the Prometheus service will now point to, and will forward all requests to the Prometheus container listening on the default port `9090`.
- Added support for passing CIS Scans in a hardened cluster by introducing a Job that patches the default service account within the `cattle-monitoring-system` and `cattle-dashboards` namespaces on install or upgrade and adding a default allow all `NetworkPolicy` to the `cattle-monitoring-system` and `cattle-dashboards` namespaces.
### Modified
- Updated the chart name from `prometheus-operator` to `rancher-monitoring` and added the `io.rancher.certified: rancher` annotation to `Chart.yaml`
- Modified the default `node-exporter` port from `9100` to `9796`
- Modified the default `nameOverride` to `rancher-monitoring`. This change is necessary as the Prometheus Adapter's default URL (`http://{{ .Values.nameOverride }}-prometheus.{{ .Values.namespaceOverride }}.svc`) is based off of the value used here; if modified, the default Adapter URL must also be modified
- Modified the default `namespaceOverride` to `cattle-monitoring-system`. This change is necessary as the Prometheus Adapter's default URL (`http://{{ .Values.nameOverride }}-prometheus.{{ .Values.namespaceOverride }}.svc`) is based off of the value used here; if modified, the default Adapter URL must also be modified
- Configured some default values for `grafana.service` values and exposed them in the default README.md
- The default namespaces the following ServiceMonitors were changed from the deployment namespace to allow them to continue to monitor metrics when `prometheus.prometheusSpec.ignoreNamespaceSelectors` is enabled:
- `core-dns`: `kube-system`
- `api-server`: `default`
- `kube-controller-manager`: `kube-system`
- `kubelet`: `{{ .Values.kubelet.namespace }}`
- Disabled the following deployments by default (can be enabled if required):
- `AlertManager`
- `kube-controller-manager` metrics exporter
- `kube-etcd` metrics exporter
- `kube-scheduler` metrics exporter
- `kube-proxy` metrics exporter
- Updated default Grafana `deploymentStrategy` to `Recreate` to prevent deployments from being stuck on upgrade if a PV is attached to Grafana
- Modified the default `<serviceMonitor|podMonitor|rule>SelectorNilUsesHelmValues` to default to `false`. As a result, we look for all CRs with any labels in all namespaces by default rather than just the ones tagged with the label `release: rancher-monitoring`.
- Modified the default images used by the `rancher-monitoring` chart to point to Rancher mirrors of the original images from upstream.
- Modified the behavior of the chart to create the Alertmanager Config Secret via a pre-install hook instead of using the normal Helm lifecycle to manage the secret. The benefit of this approach is that all changes to the Config Secret done on a live cluster will never get overridden on a `helm upgrade` since the secret only gets created on a `helm install`. If you would like the secret to be cleaned up on an `helm uninstall`, enable `alertmanager.cleanupOnUninstall`; however, this is disabled by default to prevent the loss of alerting configuration on an uninstall. This secret will never be modified on a `helm upgrade`.
- Modified the default `securityContext` for `Pod` templates across the chart to `{"runAsNonRoot": "true", "runAsUser": "1000"}` and replaced `grafana.rbac.pspUseAppArmor` in favor of `grafana.rbac.pspAnnotations={}` in order to make it possible to deploy this chart on a hardened cluster which does not support Seccomp or AppArmor annotations in PSPs. Users can always choose to specify the annotations they want to use for the PSP directly as part of the values provided.
- Modified `.Values.prometheus.prometheusSpec.containers` to take in a string representing a template that should be rendered by Helm (via `tpl`) instead of allowing a user to provide YAML directly.
- Modified the default Grafana configuration to auto assign users who access Grafana to the Viewer role and enable anonymous access to Grafana dashboards by default. This default works well for a Rancher user who is accessing Grafana via the `kubectl proxy` on the Rancher Dashboard UI since anonymous users who enter via the proxy are authenticated by the k8s API Server, but you can / should modify this behavior if you plan on exposing Grafana in a way that does not require authentication (e.g. as a `NodePort` service).
- Modified the default Grafana configuration to add a default dashboard for Rancher on the Grafana home page.

View File

@ -0,0 +1,12 @@
# Contributing Guidelines
## How to contribute to this chart
1. Fork this repository, develop and test your Chart.
1. Bump the chart version for every change.
1. Ensure PR title has the prefix `[kube-prometheus-stack]`
1. When making changes to rules or dashboards, see the README.md section on how to sync data from upstream repositories
1. Check the `hack/minikube` folder has scripts to set up minikube and components of this chart that will allow all components to be scraped. You can use this configuration when validating your changes.
1. Check for changes of RBAC rules.
1. Check for changes in CRD specs.
1. PR must pass the linter (`helm lint`)

View File

@ -0,0 +1,46 @@
annotations:
artifacthub.io/links: |
- name: Chart Source
url: https://github.com/prometheus-community/helm-charts
- name: Upstream Project
url: https://github.com/prometheus-operator/kube-prometheus
artifacthub.io/operator: "true"
catalog.cattle.io/auto-install: rancher-monitoring-crd=match
catalog.cattle.io/certified: rancher
catalog.cattle.io/display-name: Monitoring
catalog.cattle.io/namespace: cattle-monitoring-system
catalog.cattle.io/provides-gvr: monitoring.coreos.com.prometheus/v1
catalog.cattle.io/release-name: rancher-monitoring
catalog.cattle.io/requests-cpu: 4500m
catalog.cattle.io/requests-memory: 4000Mi
catalog.cattle.io/ui-component: monitoring
apiVersion: v1
appVersion: 0.38.1
description: Collects several related Helm charts, Grafana dashboards, and Prometheus
rules combined with documentation and scripts to provide easy to operate end-to-end
Kubernetes cluster monitoring with Prometheus using the Prometheus Operator.
home: https://github.com/prometheus-operator/kube-prometheus
icon: https://raw.githubusercontent.com/prometheus/prometheus.github.io/master/assets/prometheus_logo-cb55bb5c346.png
keywords:
- operator
- prometheus
- kube-prometheus
- monitoring
maintainers:
- name: vsliouniaev
- name: bismarck
- email: gianrubio@gmail.com
name: gianrubio
- email: github.gkarthiks@gmail.com
name: gkarthiks
- email: scott@r6by.com
name: scottrigby
- email: miroslav.hadzhiev@gmail.com
name: Xtigyro
- email: arvind.iyengar@suse.com
name: Arvind
name: rancher-monitoring
sources:
- https://github.com/prometheus-community/helm-charts
- https://github.com/prometheus-operator/kube-prometheus
version: 9.4.204-rc09

View File

@ -0,0 +1,346 @@
# kube-prometheus-stack
Installs the [kube-prometheus stack](https://github.com/prometheus-operator/kube-prometheus), a collection of Kubernetes manifests, [Grafana](http://grafana.com/) dashboards, and [Prometheus rules](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/) combined with documentation and scripts to provide easy to operate end-to-end Kubernetes cluster monitoring with [Prometheus](https://prometheus.io/) using the [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator).
See the [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus) README for details about components, dashboards, and alerts.
_Note: This chart was formerly named `prometheus-operator` chart, now renamed to more clearly reflect that it installs the `kube-prometheus` project stack, within which Prometheus Operator is only one component._
## Prerequisites
- Kubernetes 1.10+ with Beta APIs
- Helm 2.12+ (If using Helm < 2.14, [see below for CRD workaround](#Helm-fails-to-create-CRDs))
## Get Repo Info
```console
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
helm repo add stable https://charts.helm.sh/stable/
helm repo update
```
_See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._
## Install Chart
```console
# Helm 3
$ helm install [RELEASE_NAME] prometheus-community/kube-prometheus-stack
# Helm 2
$ helm install --name [RELEASE_NAME] prometheus-community/kube-prometheus-stack
```
_See [configuration](#configuration) below._
_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._
## Dependencies
By default this chart installs additional, dependent charts:
- [stable/kube-state-metrics](https://github.com/helm/charts/tree/master/stable/kube-state-metrics)
- [stable/prometheus-node-exporter](https://github.com/prometheus-community/helm-charts/tree/main/prometheus-node-exporter)
- [grafana/grafana](https://github.com/grafana/helm-charts/tree/main/charts/grafana)
To disable dependencies during installation, see [multiple releases](#multiple-releases) below.
_See [helm dependency](https://helm.sh/docs/helm/helm_dependency/) for command documentation._
## Uninstall Chart
```console
# Helm 3
$ helm uninstall [RELEASE_NAME]
# Helm 2
# helm delete --purge [RELEASE_NAME]
```
This removes all the Kubernetes components associated with the chart and deletes the release.
_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._
CRDs created by this chart are not removed by default and should be manually cleaned up:
```console
kubectl delete crd prometheuses.monitoring.coreos.com
kubectl delete crd prometheusrules.monitoring.coreos.com
kubectl delete crd servicemonitors.monitoring.coreos.com
kubectl delete crd podmonitors.monitoring.coreos.com
kubectl delete crd alertmanagers.monitoring.coreos.com
kubectl delete crd thanosrulers.monitoring.coreos.com
```
## Upgrading Chart
```console
# Helm 3 or 2
$ helm upgrade [RELEASE_NAME] prometheus-community/kube-prometheus-stack
```
_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._
### Upgrading an existing Release to a new major version
A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an incompatible breaking change needing manual actions.
### From 8.x to 9.x
Version 9 of the helm chart removes the existing `additionalScrapeConfigsExternal` in favour of `additionalScrapeConfigsSecret`. This change lets users specify the secret name and secret key to use for the additional scrape configuration of prometheus. This is useful for users that have prometheus-operator as a subchart and also have a template that creates the additional scrape configuration.
### From 7.x to 8.x
Due to new template functions being used in the rules in version 8.x.x of the chart, an upgrade to Prometheus Operator and Prometheus is necessary in order to support them. First, upgrade to the latest version of 7.x.x
```sh
helm upgrade [RELEASE_NAME] prometheus-community/kube-prometheus-stack --version 7.5.0
```
Then upgrade to 8.x.x
```sh
helm upgrade [RELEASE_NAME] prometheus-community/kube-prometheus-stack --version [8.x.x]
```
Minimal recommended Prometheus version for this chart release is `2.12.x`
### From 6.x to 7.x
Due to a change in grafana subchart, version 7.x.x now requires Helm >= 2.12.0.
### From 5.x to 6.x
Due to a change in deployment labels of kube-state-metrics, the upgrade requires `helm upgrade --force` in order to re-create the deployment. If this is not done an error will occur indicating that the deployment cannot be modified:
```console
invalid: spec.selector: Invalid value: v1.LabelSelector{MatchLabels:map[string]string{"app.kubernetes.io/name":"kube-state-metrics"}, MatchExpressions:[]v1.LabelSelectorRequirement(nil)}: field is immutable
```
If this error has already been encountered, a `helm history` command can be used to determine which release has worked, then `helm rollback` to the release, then `helm upgrade --force` to this new one
## Configuration
See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments:
```console
helm show values prometheus-community/kube-prometheus-stack
```
You may also run `helm show values` on this chart's [dependencies](#dependencies) for additional options.
### Rancher Monitoring Configuration
The following table shows values exposed by Rancher Monitoring's additions to the chart:
| Parameter | Description | Default |
| ----- | ----------- | ------ |
| `nameOverride` | Provide a name that should be used instead of the chart name when naming all resources deployed by this chart |`"rancher-monitoring"`|
| `namespaceOverride` | Override the deployment namespace | `"cattle-monitoring-system"` |
| `global.rbac.userRoles.create` | Create default user ClusterRoles to allow users to interact with Prometheus CRs, ConfigMaps, and Secrets | `true` |
| `global.rbac.userRoles.aggregateToDefaultRoles` | Aggregate default user ClusterRoles into default k8s ClusterRoles | `true` |
| `prometheus-adapter.enabled` | Whether to install [prometheus-adapter](https://github.com/helm/charts/tree/master/stable/prometheus-adapter) within the cluster | `true` |
| `prometheus-adapter.prometheus.url` | A URL pointing to the Prometheus deployment within your cluster. The default value is set based on the assumption that you plan to deploy the default Prometheus instance from this chart where `.Values.namespaceOverride=cattle-monitoring-system` and `.Values.nameOverride=rancher-monitoring` | `http://rancher-monitoring-prometheus.cattle-monitoring-system.svc` |
| `prometheus-adapter.prometheus.port` | The port on the Prometheus deployment that Prometheus Adapter can make requests to | `9090` |
| `prometheus.prometheusSpec.ignoreNamespaceSelectors` | Ignore NamespaceSelector settings from the PodMonitor and ServiceMonitor configs. If true, PodMonitors and ServiceMonitors can only discover Pods and Services within the namespace they are deployed into | `false` |
| `alertmanager.secret.cleanupOnUninstall` | Whether or not to trigger a job to clean up the alertmanager config secret to be deleted on a `helm uninstall`. By default, this is disabled to prevent the loss of alerting configuration on an uninstall. | `false` |
| `alertmanager.secret.image.pullPolicy` | Image pull policy for job(s) related to alertmanager config secret's lifecycle | `IfNotPresent` |
| `alertmanager.secret.image.repository` | Repository to use for job(s) related to alertmanager config secret's lifecycle | `rancher/rancher-agent` |
| `alertmanager.secret.image.tag` | Tag to use for job(s) related to alertmanager config secret's lifecycle | `v2.4.8` |
The following values are enabled for different distributions via [rancher-pushprox](https://github.com/rancher/dev-charts/tree/master/packages/rancher-pushprox). See the rancher-pushprox `README.md` for more information on what all values can be configured for the PushProxy chart.
| Parameter | Description | Default |
| ----- | ----------- | ------ |
| `rkeControllerManager.enabled` | Create a PushProx installation for monitoring kube-controller-manager metrics in RKE clusters | `false` |
| `rkeScheduler.enabled` | Create a PushProx installation for monitoring kube-scheduler metrics in RKE clusters | `false` |
| `rkeProxy.enabled` | Create a PushProx installation for monitoring kube-proxy metrics in RKE clusters | `false` |
| `rkeEtcd.enabled` | Create a PushProx installation for monitoring etcd metrics in RKE clusters | `false` |
| `k3sServer.enabled` | Create a PushProx installation for monitoring k3s-server metrics (accounts for kube-controller-manager, kube-scheduler, and kube-proxy metrics) in k3s clusters | `false` |
| `kubeAdmControllerManager.enabled` | Create a PushProx installation for monitoring kube-controller-manager metrics in kubeAdm clusters | `false` |
| `kubeAdmScheduler.enabled` | Create a PushProx installation for monitoring kube-scheduler metrics in kubeAdm clusters | `false` |
| `kubeAdmProxy.enabled` | Create a PushProx installation for monitoring kube-proxy metrics in kubeAdm clusters | `false` |
| `kubeAdmEtcd.enabled` | Create a PushProx installation for monitoring etcd metrics in kubeAdm clusters | `false` |
### Multiple releases
The same chart can be used to run multiple Prometheus instances in the same cluster if required. To achieve this, it is necessary to run only one instance of prometheus-operator and a pair of alertmanager pods for an HA configuration, while all other components need to be disabled. To disable a dependency during installation, set `kubeStateMetrics.enabled`, `nodeExporter.enabled` and `grafana.enabled` to `false`.
## Work-Arounds for Known Issues
### Running on private GKE clusters
When Google configure the control plane for private clusters, they automatically configure VPC peering between your Kubernetes clusters network and a separate Google managed project. In order to restrict what Google are able to access within your cluster, the firewall rules configured restrict access to your Kubernetes pods. This means that in order to use the webhook component with a GKE private cluster, you must configure an additional firewall rule to allow the GKE control plane access to your webhook pod.
You can read more information on how to add firewall rules for the GKE control plane nodes in the [GKE docs](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters#add_firewall_rules)
Alternatively, you can disable the hooks by setting `prometheusOperator.admissionWebhooks.enabled=false`.
### Helm fails to create CRDs
You should upgrade to Helm 2.14 + in order to avoid this issue. However, if you are stuck with an earlier Helm release you should instead use the following approach: Due to a bug in helm, it is possible for the 5 CRDs that are created by this chart to fail to get fully deployed before Helm attempts to create resources that require them. This affects all versions of Helm with a [potential fix pending](https://github.com/helm/helm/pull/5112). In order to work around this issue when installing the chart you will need to make sure all 5 CRDs exist in the cluster first and disable their previsioning by the chart:
1. Create CRDs
```console
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/release-0.38/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/release-0.38/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/release-0.38/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/release-0.38/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/release-0.38/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/release-0.38/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
```
2. Wait for CRDs to be created, which should only take a few seconds
3. [Install](#install-chart) the chart, but disable the CRD provisioning by setting `prometheusOperator.createCustomResource` to `false`
## PrometheusRules Admission Webhooks
With Prometheus Operator version 0.30+, the core Prometheus Operator pod exposes an endpoint that will integrate with the `validatingwebhookconfiguration` Kubernetes feature to prevent malformed rules from being added to the cluster.
### How the Chart Configures the Hooks
A validating and mutating webhook configuration requires the endpoint to which the request is sent to use TLS. It is possible to set up custom certificates to do this, but in most cases, a self-signed certificate is enough. The setup of this component requires some more complex orchestration when using helm. The steps are created to be idempotent and to allow turning the feature on and off without running into helm quirks.
1. A pre-install hook provisions a certificate into the same namespace using a format compatible with provisioning using end-user certificates. If the certificate already exists, the hook exits.
2. The prometheus operator pod is configured to use a TLS proxy container, which will load that certificate.
3. Validating and Mutating webhook configurations are created in the cluster, with their failure mode set to Ignore. This allows rules to be created by the same chart at the same time, even though the webhook has not yet been fully set up - it does not have the correct CA field set.
4. A post-install hook reads the CA from the secret created by step 1 and patches the Validating and Mutating webhook configurations. This process will allow a custom CA provisioned by some other process to also be patched into the webhook configurations. The chosen failure policy is also patched into the webhook configurations
### Alternatives
It should be possible to use [jetstack/cert-manager](https://github.com/jetstack/cert-manager) if a more complete solution is required, but it has not been tested.
### Limitations
Because the operator can only run as a single pod, there is potential for this component failure to cause rule deployment failure. Because this risk is outweighed by the benefit of having validation, the feature is enabled by default.
## Developing Prometheus Rules and Grafana Dashboards
This chart Grafana Dashboards and Prometheus Rules are just a copy from [prometheus-operator/prometheus-operator](https://github.com/prometheus-operator/prometheus-operator) and other sources, synced (with alterations) by scripts in [hack](hack) folder. In order to introduce any changes you need to first [add them to the original repo](https://github.com/prometheus-operator/kube-prometheus/blob/master/docs/developing-prometheus-rules-and-grafana-dashboards.md) and then sync there by scripts.
## Further Information
For more in-depth documentation of configuration options meanings, please see
- [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator)
- [Prometheus](https://prometheus.io/docs/introduction/overview/)
- [Grafana](https://github.com/grafana/helm-charts/tree/main/charts/grafana#grafana-helm-chart)
## prometheus.io/scrape
The prometheus operator does not support annotation-based discovery of services, using the `serviceMonitor` CRD in its place as it provides far more configuration options. For information on how to use servicemonitors, please see the documentation on the `prometheus-operator/prometheus-operator` documentation here: [Running Exporters](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/running-exporters.md)
By default, Prometheus discovers ServiceMonitors within its namespace, that are labeled with the same release tag as the prometheus-operator release. Sometimes, you may need to discover custom ServiceMonitors, for example used to scrape data from third-party applications. An easy way of doing this, without compromising the default ServiceMonitors discovery, is allowing Prometheus to discover all ServiceMonitors within its namespace, without applying label filtering. To do so, you can set `prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues` to `false`.
## Migrating from coreos/prometheus-operator chart
The multiple charts have been combined into a single chart that installs prometheus operator, prometheus, alertmanager, grafana as well as the multitude of exporters necessary to monitor a cluster.
There is no simple and direct migration path between the charts as the changes are extensive and intended to make the chart easier to support.
The capabilities of the old chart are all available in the new chart, including the ability to run multiple prometheus instances on a single cluster - you will need to disable the parts of the chart you do not wish to deploy.
You can check out the tickets for this change [here](https://github.com/prometheus-operator/prometheus-operator/issues/592) and [here](https://github.com/helm/charts/pull/6765).
### High-level overview of Changes
#### Added dependencies
The chart has added 3 [dependencies](#dependencies).
- Node-Exporter, Kube-State-Metrics: These components are loaded as dependencies into the chart, and are relatively simple components
- Grafana: The Grafana chart is more feature-rich than this chart - it contains a sidecar that is able to load data sources and dashboards from configmaps deployed into the same cluster. For more information check out the [documentation for the chart](https://github.com/helm/charts/tree/master/stable/grafana)
#### CoreOS CRDs
The CRDs are provisioned using a separate chart installation within the Helm chart `rancher-monitoring-crd` that is packaged alongside this chart.
#### Kubelet Service
Because the kubelet service has a new name in the chart, make sure to clean up the old kubelet service in the `kube-system` namespace to prevent counting container metrics twice.
#### Persistent Volumes
If you would like to keep the data of the current persistent volumes, it should be possible to attach existing volumes to new PVCs and PVs that are created using the conventions in the new chart. For example, in order to use an existing Azure disk for a helm release called `prometheus-migration` the following resources can be created:
```yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: pvc-prometheus-migration-prometheus-0
spec:
accessModes:
- ReadWriteOnce
azureDisk:
cachingMode: None
diskName: pvc-prometheus-migration-prometheus-0
diskURI: /subscriptions/f5125d82-2622-4c50-8d25-3f7ba3e9ac4b/resourceGroups/sample-migration-resource-group/providers/Microsoft.Compute/disks/pvc-prometheus-migration-prometheus-0
fsType: ""
kind: Managed
readOnly: false
capacity:
storage: 1Gi
persistentVolumeReclaimPolicy: Delete
storageClassName: prometheus
volumeMode: Filesystem
```
```yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
labels:
app: prometheus
prometheus: prometheus-migration-prometheus
name: prometheus-prometheus-migration-prometheus-db-prometheus-prometheus-migration-prometheus-0
namespace: monitoring
spec:
accessModes:
- ReadWriteOnce
dataSource: null
resources:
requests:
storage: 1Gi
storageClassName: prometheus
volumeMode: Filesystem
volumeName: pvc-prometheus-migration-prometheus-0
status:
accessModes:
- ReadWriteOnce
capacity:
storage: 1Gi
```
The PVC will take ownership of the PV and when you create a release using a persistent volume claim template it will use the existing PVCs as they match the naming convention used by the chart. For other cloud providers similar approaches can be used.
#### KubeProxy
The metrics bind address of kube-proxy is default to `127.0.0.1:10249` that prometheus instances **cannot** access to. You should expose metrics by changing `metricsBindAddress` field value to `0.0.0.0:10249` if you want to collect them.
Depending on the cluster, the relevant part `config.conf` will be in ConfigMap `kube-system/kube-proxy` or `kube-system/kube-proxy-config`. For example:
```console
kubectl -n kube-system edit cm kube-proxy
```
```yaml
apiVersion: v1
data:
config.conf: |-
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
# ...
# metricsBindAddress: 127.0.0.1:10249
metricsBindAddress: 0.0.0.0:10249
# ...
kubeconfig.conf: |-
# ...
kind: ConfigMap
metadata:
labels:
app: kube-proxy
name: kube-proxy
namespace: kube-system
```

View File

@ -0,0 +1,15 @@
# Rancher Monitoring and Alerting
This chart is based on the upstream [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) chart. The chart deploys [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator) and its CRDs along with [Grafana](https://github.com/grafana/helm-charts/tree/main/charts/grafana), [Prometheus Adapter](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-adapter) and additional charts / Kubernetes manifests to gather metrics. It allows users to monitor their Kubernetes clusters, view metrics in Grafana dashboards, and set up alerts and notifications.
For more information on how to use the feature, refer to our [docs](https://rancher.com/docs/rancher/v2.x/en/monitoring-alerting/v2.5/).
The chart installs the following components:
- [Prometheus Operator](https://github.com/coreos/prometheus-operator) - The operator provides easy monitoring definitions for Kubernetes services, manages [Prometheus](https://prometheus.io/) and [AlertManager](https://prometheus.io/docs/alerting/latest/alertmanager/) instances, and adds default scrape targets for some Kubernetes components.
- [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus/) - A collection of community-curated Kubernetes manifests, Grafana Dashboards, and PrometheusRules that deploy a default end-to-end cluster monitoring configuration.
- [Grafana](https://github.com/helm/charts/tree/master/stable/grafana) - Grafana allows a user to create / view dashboards based on the cluster metrics collected by Prometheus.
- [node-exporter](https://github.com/helm/charts/tree/master/stable/prometheus-node-exporter) / [kube-state-metrics](https://github.com/helm/charts/tree/master/stable/kube-state-metrics) / [rancher-pushprox](https://github.com/rancher/charts/tree/dev-v2.5/packages/rancher-pushprox/charts) - These charts monitor various Kubernetes components across different Kubernetes cluster types.
- [Prometheus Adapter](https://github.com/helm/charts/tree/master/stable/prometheus-adapter) - The adapter allows a user to expose custom metrics, resource metrics, and external metrics on the default [Prometheus](https://prometheus.io/) instance to the Kubernetes API Server.
For more information, review the Helm README of this chart.

View File

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.vscode
.project
.idea/
*.tmproj
OWNERS

View File

@ -0,0 +1,17 @@
apiVersion: v1
appVersion: 7.1.5
description: The leading tool for querying and visualizing time series and metrics.
home: https://grafana.net
icon: https://raw.githubusercontent.com/grafana/grafana/master/public/img/logo_transparent_400x.png
kubeVersion: ^1.8.0-0
maintainers:
- email: zanhsieh@gmail.com
name: zanhsieh
- email: rluckie@cisco.com
name: rtluckie
- email: maor.friedman@redhat.com
name: maorfr
name: grafana
sources:
- https://github.com/grafana/grafana
version: 5.6.4

View File

@ -0,0 +1,424 @@
# Grafana Helm Chart
* Installs the web dashboarding system [Grafana](http://grafana.org/)
## TL;DR
```console
helm install grafana/grafana
```
## Installing the Chart
To install the chart with the release name `my-release`:
```console
helm install --name my-release grafana/grafana
```
## Uninstalling the Chart
To uninstall/delete the my-release deployment:
```console
helm delete my-release
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Upgrading an existing Release to a new major version
A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an
incompatible breaking change needing manual actions.
### To 4.0.0 (And 3.12.1)
This version requires Helm >= 2.12.0.
### To 5.0.0
You have to add --force to your helm upgrade command as the labels of the chart have changed.
## Configuration
| Parameter | Description | Default |
|-------------------------------------------|-----------------------------------------------|---------------------------------------------------------|
| `replicas` | Number of nodes | `1` |
| `podDisruptionBudget.minAvailable` | Pod disruption minimum available | `nil` |
| `podDisruptionBudget.maxUnavailable` | Pod disruption maximum unavailable | `nil` |
| `deploymentStrategy` | Deployment strategy | `{ "type": "RollingUpdate" }` |
| `livenessProbe` | Liveness Probe settings | `{ "httpGet": { "path": "/api/health", "port": 3000 } "initialDelaySeconds": 60, "timeoutSeconds": 30, "failureThreshold": 10 }` |
| `readinessProbe` | Readiness Probe settings | `{ "httpGet": { "path": "/api/health", "port": 3000 } }`|
| `securityContext` | Deployment securityContext | `{"runAsUser": 472, "runAsGroup": 472, "fsGroup": 472}` |
| `priorityClassName` | Name of Priority Class to assign pods | `nil` |
| `image.repository` | Image repository | `grafana/grafana` |
| `image.tag` | Image tag (`Must be >= 5.0.0`) | `7.0.3` |
| `image.sha` | Image sha (optional) | `17cbd08b9515fda889ca959e9d72ee6f3327c8f1844a3336dfd952134f38e2fe` |
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
| `image.pullSecrets` | Image pull secrets | `{}` |
| `service.type` | Kubernetes service type | `ClusterIP` |
| `service.port` | Kubernetes port where service is exposed | `80` |
| `service.portName` | Name of the port on the service | `service` |
| `service.targetPort` | Internal service is port | `3000` |
| `service.nodePort` | Kubernetes service nodePort | `nil` |
| `service.annotations` | Service annotations | `{}` |
| `service.labels` | Custom labels | `{}` |
| `service.clusterIP` | internal cluster service IP | `nil` |
| `service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `nil` |
| `service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to lb (if supported) | `[]` |
| `service.externalIPs` | service external IP addresses | `[]` |
| `extraExposePorts` | Additional service ports for sidecar containers| `[]` |
| `hostAliases` | adds rules to the pod's /etc/hosts | `[]` |
| `ingress.enabled` | Enables Ingress | `false` |
| `ingress.annotations` | Ingress annotations (values are templated) | `{}` |
| `ingress.labels` | Custom labels | `{}` |
| `ingress.path` | Ingress accepted path | `/` |
| `ingress.hosts` | Ingress accepted hostnames | `["chart-example.local"]` |
| `ingress.extraPaths` | Ingress extra paths to prepend to every host configuration. Useful when configuring [custom actions with AWS ALB Ingress Controller](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/guide/ingress/annotation/#actions). | `[]` |
| `ingress.tls` | Ingress TLS configuration | `[]` |
| `resources` | CPU/Memory resource requests/limits | `{}` |
| `nodeSelector` | Node labels for pod assignment | `{}` |
| `tolerations` | Toleration labels for pod assignment | `[]` |
| `affinity` | Affinity settings for pod assignment | `{}` |
| `extraInitContainers` | Init containers to add to the grafana pod | `{}` |
| `extraContainers` | Sidecar containers to add to the grafana pod | `{}` |
| `extraContainerVolumes` | Volumes that can be mounted in sidecar containers | `[]` |
| `schedulerName` | Name of the k8s scheduler (other than default) | `nil` |
| `persistence.enabled` | Use persistent volume to store data | `false` |
| `persistence.type` | Type of persistence (`pvc` or `statefulset`) | `pvc` |
| `persistence.size` | Size of persistent volume claim | `10Gi` |
| `persistence.existingClaim` | Use an existing PVC to persist data | `nil` |
| `persistence.storageClassName` | Type of persistent volume claim | `nil` |
| `persistence.accessModes` | Persistence access modes | `[ReadWriteOnce]` |
| `persistence.annotations` | PersistentVolumeClaim annotations | `{}` |
| `persistence.finalizers` | PersistentVolumeClaim finalizers | `[ "kubernetes.io/pvc-protection" ]` |
| `persistence.subPath` | Mount a sub dir of the persistent volume | `nil` |
| `initChownData.enabled` | If false, don't reset data ownership at startup | true |
| `initChownData.image.repository` | init-chown-data container image repository | `busybox` |
| `initChownData.image.tag` | init-chown-data container image tag | `1.31.1` |
| `initChownData.image.sha` | init-chown-data container image sha (optional)| `""` |
| `initChownData.image.pullPolicy` | init-chown-data container image pull policy | `IfNotPresent` |
| `initChownData.resources` | init-chown-data pod resource requests & limits | `{}` |
| `schedulerName` | Alternate scheduler name | `nil` |
| `env` | Extra environment variables passed to pods | `{}` |
| `envValueFrom` | Environment variables from alternate sources. See the API docs on [EnvVarSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#envvarsource-v1-core) for format details. | `{}` |
| `envFromSecret` | Name of a Kubernetes secret (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `""` |
| `envRenderSecret` | Sensible environment variables passed to pods and stored as secret | `{}` |
| `extraSecretMounts` | Additional grafana server secret mounts | `[]` |
| `extraVolumeMounts` | Additional grafana server volume mounts | `[]` |
| `extraConfigmapMounts` | Additional grafana server configMap volume mounts | `[]` |
| `extraEmptyDirMounts` | Additional grafana server emptyDir volume mounts | `[]` |
| `plugins` | Plugins to be loaded along with Grafana | `[]` |
| `datasources` | Configure grafana datasources (passed through tpl) | `{}` |
| `notifiers` | Configure grafana notifiers | `{}` |
| `dashboardProviders` | Configure grafana dashboard providers | `{}` |
| `dashboards` | Dashboards to import | `{}` |
| `dashboardsConfigMaps` | ConfigMaps reference that contains dashboards | `{}` |
| `grafana.ini` | Grafana's primary configuration | `{}` |
| `ldap.enabled` | Enable LDAP authentication | `false` |
| `ldap.existingSecret` | The name of an existing secret containing the `ldap.toml` file, this must have the key `ldap-toml`. | `""` |
| `ldap.config` | Grafana's LDAP configuration | `""` |
| `annotations` | Deployment annotations | `{}` |
| `labels` | Deployment labels | `{}` |
| `podAnnotations` | Pod annotations | `{}` |
| `podLabels` | Pod labels | `{}` |
| `podPortName` | Name of the grafana port on the pod | `grafana` |
| `sidecar.image.repository` | Sidecar image repository | `kiwigrid/k8s-sidecar` |
| `sidecar.image.tag` | Sidecar image tag | `0.1.151` |
| `sidecar.image.sha` | Sidecar image sha (optional) | `""` |
| `sidecar.imagePullPolicy` | Sidecar image pull policy | `IfNotPresent` |
| `sidecar.resources` | Sidecar resources | `{}` |
| `sidecar.enableUniqueFilenames` | Sets the kiwigrid/k8s-sidecar UNIQUE_FILENAMES environment variable | `false` |
| `sidecar.dashboards.enabled` | Enables the cluster wide search for dashboards and adds/updates/deletes them in grafana | `false` |
| `sidecar.dashboards.SCProvider` | Enables creation of sidecar provider | `true` |
| `sidecar.dashboards.provider.name` | Unique name of the grafana provider | `sidecarProvider` |
| `sidecar.dashboards.provider.orgid` | Id of the organisation, to which the dashboards should be added | `1` |
| `sidecar.dashboards.provider.folder` | Logical folder in which grafana groups dashboards | `""` |
| `sidecar.dashboards.provider.disableDelete` | Activate to avoid the deletion of imported dashboards | `false` |
| `sidecar.dashboards.provider.allowUiUpdates` | Allow updating provisioned dashboards from the UI | `false` |
| `sidecar.dashboards.provider.type` | Provider type | `file` |
| `sidecar.dashboards.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` |
| `sidecar.skipTlsVerify` | Set to true to skip tls verification for kube api calls | `nil` |
| `sidecar.dashboards.label` | Label that config maps with dashboards should have to be added | `grafana_dashboard` |
| `sidecar.dashboards.folder` | Folder in the pod that should hold the collected dashboards (unless `sidecar.dashboards.defaultFolderName` is set). This path will be mounted. | `/tmp/dashboards` |
| `sidecar.dashboards.defaultFolderName` | The default folder name, it will create a subfolder under the `sidecar.dashboards.folder` and put dashboards in there instead | `nil` |
| `sidecar.dashboards.searchNamespace` | If specified, the sidecar will search for dashboard config-maps inside this namespace. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces | `nil` |
| `sidecar.datasources.enabled` | Enables the cluster wide search for datasources and adds/updates/deletes them in grafana |`false` |
| `sidecar.datasources.label` | Label that config maps with datasources should have to be added | `grafana_datasource` |
| `sidecar.datasources.searchNamespace` | If specified, the sidecar will search for datasources config-maps inside this namespace. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces | `nil` |
| `sidecar.notifiers.enabled` | Enables the cluster wide search for notifiers and adds/updates/deletes them in grafana |`false` |
| `sidecar.notifiers.label` | Label that config maps with notifiers should have to be added | `grafana_notifier` |
| `sidecar.notifiers.searchNamespace` | If specified, the sidecar will search for notifiers config-maps (or secrets) inside this namespace. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces | `nil` |
| `smtp.existingSecret` | The name of an existing secret containing the SMTP credentials. | `""` |
| `smtp.userKey` | The key in the existing SMTP secret containing the username. | `"user"` |
| `smtp.passwordKey` | The key in the existing SMTP secret containing the password. | `"password"` |
| `admin.existingSecret` | The name of an existing secret containing the admin credentials. | `""` |
| `admin.userKey` | The key in the existing admin secret containing the username. | `"admin-user"` |
| `admin.passwordKey` | The key in the existing admin secret containing the password. | `"admin-password"` |
| `serviceAccount.annotations` | ServiceAccount annotations | |
| `serviceAccount.create` | Create service account | `true` |
| `serviceAccount.name` | Service account name to use, when empty will be set to created account if `serviceAccount.create` is set else to `default` | `` |
| `serviceAccount.nameTest` | Service account name to use for test, when empty will be set to created account if `serviceAccount.create` is set else to `default` | `nil` |
| `rbac.create` | Create and use RBAC resources | `true` |
| `rbac.namespaced` | Creates Role and Rolebinding instead of the default ClusterRole and ClusteRoleBindings for the grafana instance | `false` |
| `rbac.pspEnabled` | Create PodSecurityPolicy (with `rbac.create`, grant roles permissions as well) | `true` |
| `rbac.pspUseAppArmor` | Enforce AppArmor in created PodSecurityPolicy (requires `rbac.pspEnabled`) | `true` |
| `rbac.extraRoleRules` | Additional rules to add to the Role | [] |
| `rbac.extraClusterRoleRules` | Additional rules to add to the ClusterRole | [] |
| `command` | Define command to be executed by grafana container at startup | `nil` |
| `testFramework.enabled` | Whether to create test-related resources | `true` |
| `testFramework.image` | `test-framework` image repository. | `bats/bats` |
| `testFramework.tag` | `test-framework` image tag. | `v1.1.0` |
| `testFramework.imagePullPolicy` | `test-framework` image pull policy. | `IfNotPresent` |
| `testFramework.securityContext` | `test-framework` securityContext | `{}` |
| `downloadDashboards.env` | Environment variables to be passed to the `download-dashboards` container | `{}` |
| `downloadDashboards.resources` | Resources of `download-dashboards` container | `{}` |
| `downloadDashboardsImage.repository` | Curl docker image repo | `curlimages/curl` |
| `downloadDashboardsImage.tag` | Curl docker image tag | `7.70.0` |
| `downloadDashboardsImage.sha` | Curl docker image sha (optional) | `""` |
| `downloadDashboardsImage.pullPolicy` | Curl docker image pull policy | `IfNotPresent` |
| `namespaceOverride` | Override the deployment namespace | `""` (`Release.Namespace`) |
| `serviceMonitor.enabled` | Use servicemonitor from prometheus operator | `false` |
| `serviceMonitor.namespace` | Namespace this servicemonitor is installed in | |
| `serviceMonitor.interval` | How frequently Prometheus should scrape | `1m` |
| `serviceMonitor.path` | Path to scrape | `/metrics` |
| `serviceMonitor.labels` | Labels for the servicemonitor passed to Prometheus Operator | `{}` |
| `serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `30s` |
| `serviceMonitor.relabelings` | MetricRelabelConfigs to apply to samples before ingestion. | `[]` |
### Example ingress with path
With grafana 6.3 and above
```yaml
grafana.ini:
server:
domain: monitoring.example.com
root_url: "%(protocol)s://%(domain)s/grafana"
serve_from_sub_path: true
ingress:
enabled: true
hosts:
- "monitoring.example.com"
path: "/grafana"
```
### Example of extraVolumeMounts
```yaml
- extraVolumeMounts:
- name: plugins
mountPath: /var/lib/grafana/plugins
subPath: configs/grafana/plugins
existingClaim: existing-grafana-claim
readOnly: false
```
## Import dashboards
There are a few methods to import dashboards to Grafana. Below are some examples and explanations as to how to use each method:
```yaml
dashboards:
default:
some-dashboard:
json: |
{
"annotations":
...
# Complete json file here
...
"title": "Some Dashboard",
"uid": "abcd1234",
"version": 1
}
custom-dashboard:
# This is a path to a file inside the dashboards directory inside the chart directory
file: dashboards/custom-dashboard.json
prometheus-stats:
# Ref: https://grafana.com/dashboards/2
gnetId: 2
revision: 2
datasource: Prometheus
local-dashboard:
url: https://raw.githubusercontent.com/user/repository/master/dashboards/dashboard.json
```
## BASE64 dashboards
Dashboards could be stored on a server that does not return JSON directly and instead of it returns a Base64 encoded file (e.g. Gerrit)
A new parameter has been added to the url use case so if you specify a b64content value equals to true after the url entry a Base64 decoding is applied before save the file to disk.
If this entry is not set or is equals to false not decoding is applied to the file before saving it to disk.
### Gerrit use case
Gerrit API for download files has the following schema: <https://yourgerritserver/a/{project-name}/branches/{branch-id}/files/{file-id}/content> where {project-name} and
{file-id} usually has '/' in their values and so they MUST be replaced by %2F so if project-name is user/repo, branch-id is master and file-id is equals to dir1/dir2/dashboard
the url value is <https://yourgerritserver/a/user%2Frepo/branches/master/files/dir1%2Fdir2%2Fdashboard/content>
## Sidecar for dashboards
If the parameter `sidecar.dashboards.enabled` is set, a sidecar container is deployed in the grafana
pod. This container watches all configmaps (or secrets) in the cluster and filters out the ones with
a label as defined in `sidecar.dashboards.label`. The files defined in those configmaps are written
to a folder and accessed by grafana. Changes to the configmaps are monitored and the imported
dashboards are deleted/updated.
A recommendation is to use one configmap per dashboard, as a reduction of multiple dashboards inside
one configmap is currently not properly mirrored in grafana.
Example dashboard config:
```yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: sample-grafana-dashboard
labels:
grafana_dashboard: "1"
data:
k8s-dashboard.json: |-
[...]
```
## Sidecar for datasources
If the parameter `sidecar.datasources.enabled` is set, an init container is deployed in the grafana
pod. This container lists all secrets (or configmaps, though not recommended) in the cluster and
filters out the ones with a label as defined in `sidecar.datasources.label`. The files defined in
those secrets are written to a folder and accessed by grafana on startup. Using these yaml files,
the data sources in grafana can be imported. The secrets must be created before `helm install` so
that the datasources init container can list the secrets.
Secrets are recommended over configmaps for this usecase because datasources usually contain private
data like usernames and passwords. Secrets are the more appropriate cluster resource to manage those.
Example datasource config adapted from [Grafana](http://docs.grafana.org/administration/provisioning/#example-datasource-config-file):
```yaml
apiVersion: v1
kind: Secret
metadata:
name: sample-grafana-datasource
labels:
grafana_datasource: "1"
type: Opaque
stringData:
datasource.yaml: |-
# config file version
apiVersion: 1
# list of datasources that should be deleted from the database
deleteDatasources:
- name: Graphite
orgId: 1
# list of datasources to insert/update depending
# whats available in the database
datasources:
# <string, required> name of the datasource. Required
- name: Graphite
# <string, required> datasource type. Required
type: graphite
# <string, required> access mode. proxy or direct (Server or Browser in the UI). Required
access: proxy
# <int> org id. will default to orgId 1 if not specified
orgId: 1
# <string> url
url: http://localhost:8080
# <string> database password, if used
password:
# <string> database user, if used
user:
# <string> database name, if used
database:
# <bool> enable/disable basic auth
basicAuth:
# <string> basic auth username
basicAuthUser:
# <string> basic auth password
basicAuthPassword:
# <bool> enable/disable with credentials headers
withCredentials:
# <bool> mark as default datasource. Max one per org
isDefault:
# <map> fields that will be converted to json and stored in json_data
jsonData:
graphiteVersion: "1.1"
tlsAuth: true
tlsAuthWithCACert: true
# <string> json object of data that will be encrypted.
secureJsonData:
tlsCACert: "..."
tlsClientCert: "..."
tlsClientKey: "..."
version: 1
# <bool> allow users to edit datasources from the UI.
editable: false
```
## Sidecar for notifiers
If the parameter `sidecar.notifiers.enabled` is set, an init container is deployed in the grafana
pod. This container lists all secrets (or configmaps, though not recommended) in the cluster and
filters out the ones with a label as defined in `sidecar.notifiers.label`. The files defined in
those secrets are written to a folder and accessed by grafana on startup. Using these yaml files,
the notification channels in grafana can be imported. The secrets must be created before
`helm install` so that the notifiers init container can list the secrets.
Secrets are recommended over configmaps for this usecase because alert notification channels usually contain
private data like SMTP usernames and passwords. Secrets are the more appropriate cluster resource to manage those.
Example datasource config adapted from [Grafana](https://grafana.com/docs/grafana/latest/administration/provisioning/#alert-notification-channels):
```yaml
notifiers:
- name: notification-channel-1
type: slack
uid: notifier1
# either
org_id: 2
# or
org_name: Main Org.
is_default: true
send_reminder: true
frequency: 1h
disable_resolve_message: false
# See `Supported Settings` section for settings supporter for each
# alert notification type.
settings:
recipient: 'XXX'
token: 'xoxb'
uploadImage: true
url: https://slack.com
delete_notifiers:
- name: notification-channel-1
uid: notifier1
org_id: 2
- name: notification-channel-2
# default org_id: 1
```
## How to serve Grafana with a path prefix (/grafana)
In order to serve Grafana with a prefix (e.g., <http://example.com/grafana>), add the following to your values.yaml.
```yaml
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/use-regex: "true"
path: /grafana/?(.*)
hosts:
- k8s.example.dev
grafana.ini:
server:
root_url: http://localhost:3000/grafana # this host can be localhost
```

View File

@ -0,0 +1,54 @@
1. Get your '{{ .Values.adminUser }}' user password by running:
kubectl get secret --namespace {{ template "grafana.namespace" . }} {{ template "grafana.fullname" . }} -o jsonpath="{.data.admin-password}" | base64 --decode ; echo
2. The Grafana server can be accessed via port {{ .Values.service.port }} on the following DNS name from within your cluster:
{{ template "grafana.fullname" . }}.{{ template "grafana.namespace" . }}.svc.cluster.local
{{ if .Values.ingress.enabled }}
If you bind grafana to 80, please update values in values.yaml and reinstall:
```
securityContext:
runAsUser: 0
runAsGroup: 0
fsGroup: 0
command:
- "setcap"
- "'cap_net_bind_service=+ep'"
- "/usr/sbin/grafana-server &&"
- "sh"
- "/run.sh"
```
Details refer to https://grafana.com/docs/installation/configuration/#http-port.
Or grafana would always crash.
From outside the cluster, the server URL(s) are:
{{- range .Values.ingress.hosts }}
http://{{ . }}
{{- end }}
{{ else }}
Get the Grafana URL to visit by running these commands in the same shell:
{{ if contains "NodePort" .Values.service.type -}}
export NODE_PORT=$(kubectl get --namespace {{ template "grafana.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "grafana.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ template "grafana.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{ else if contains "LoadBalancer" .Values.service.type -}}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get svc --namespace {{ template "grafana.namespace" . }} -w {{ template "grafana.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ template "grafana.namespace" . }} {{ template "grafana.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
http://$SERVICE_IP:{{ .Values.service.port -}}
{{ else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ template "grafana.namespace" . }} -l "app.kubernetes.io/name={{ template "grafana.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
kubectl --namespace {{ template "grafana.namespace" . }} port-forward $POD_NAME 3000
{{- end }}
{{- end }}
3. Login with the password from step 1 and the username: {{ .Values.adminUser }}
{{- if not .Values.persistence.enabled }}
#################################################################################
###### WARNING: Persistence is disabled!!! You will lose your data when #####
###### the Grafana pod is terminated. #####
#################################################################################
{{- end }}

View File

@ -0,0 +1,82 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "grafana.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "grafana.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "grafana.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create the name of the service account
*/}}
{{- define "grafana.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "grafana.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{- define "grafana.serviceAccountNameTest" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (print (include "grafana.fullname" .) "-test") .Values.serviceAccount.nameTest }}
{{- else -}}
{{ default "default" .Values.serviceAccount.nameTest }}
{{- end -}}
{{- end -}}
{{/*
Allow the release namespace to be overridden for multi-namespace deployments in combined charts
*/}}
{{- define "grafana.namespace" -}}
{{- if .Values.namespaceOverride -}}
{{- .Values.namespaceOverride -}}
{{- else -}}
{{- .Release.Namespace -}}
{{- end -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "grafana.labels" -}}
helm.sh/chart: {{ include "grafana.chart" . }}
{{ include "grafana.selectorLabels" . }}
{{- if or .Chart.AppVersion .Values.image.tag }}
app.kubernetes.io/version: {{ .Values.image.tag | default .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{/*
Selector labels
*/}}
{{- define "grafana.selectorLabels" -}}
app.kubernetes.io/name: {{ include "grafana.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}

View File

@ -0,0 +1,448 @@
{{- define "grafana.pod" -}}
{{- if .Values.schedulerName }}
schedulerName: "{{ .Values.schedulerName }}"
{{- end }}
serviceAccountName: {{ template "grafana.serviceAccountName" . }}
{{- if .Values.securityContext }}
securityContext:
{{ toYaml .Values.securityContext | indent 2 }}
{{- end }}
{{- if .Values.hostAliases }}
hostAliases:
{{ toYaml .Values.hostAliases | indent 2 }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: {{ .Values.priorityClassName }}
{{- end }}
{{- if ( or .Values.persistence.enabled .Values.dashboards .Values.sidecar.datasources.enabled .Values.sidecar.notifiers.enabled .Values.extraInitContainers) }}
initContainers:
{{- end }}
{{- if ( and .Values.persistence.enabled .Values.initChownData.enabled ) }}
- name: init-chown-data
{{- if .Values.initChownData.image.sha }}
image: "{{ template "system_default_registry" . }}{{ .Values.initChownData.image.repository }}:{{ .Values.initChownData.image.tag }}@sha256:{{ .Values.initChownData.image.sha }}"
{{- else }}
image: "{{ template "system_default_registry" . }}{{ .Values.initChownData.image.repository }}:{{ .Values.initChownData.image.tag }}"
{{- end }}
imagePullPolicy: {{ .Values.initChownData.image.pullPolicy }}
securityContext:
runAsNonRoot: false
runAsUser: 0
command: ["chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.runAsGroup }}", "/var/lib/grafana"]
resources:
{{ toYaml .Values.initChownData.resources | indent 6 }}
volumeMounts:
- name: storage
mountPath: "/var/lib/grafana"
{{- if .Values.persistence.subPath }}
subPath: {{ .Values.persistence.subPath }}
{{- end }}
{{- end }}
{{- if .Values.dashboards }}
- name: download-dashboards
{{- if .Values.downloadDashboardsImage.sha }}
image: "{{ template "system_default_registry" . }}{{ .Values.downloadDashboardsImage.repository }}:{{ .Values.downloadDashboardsImage.tag }}@sha256:{{ .Values.downloadDashboardsImage.sha }}"
{{- else }}
image: "{{ template "system_default_registry" . }}{{ .Values.downloadDashboardsImage.repository }}:{{ .Values.downloadDashboardsImage.tag }}"
{{- end }}
imagePullPolicy: {{ .Values.downloadDashboardsImage.pullPolicy }}
command: ["/bin/sh"]
args: [ "-c", "mkdir -p /var/lib/grafana/dashboards/default && /bin/sh /etc/grafana/download_dashboards.sh" ]
resources:
{{ toYaml .Values.downloadDashboards.resources | indent 6 }}
env:
{{- range $key, $value := .Values.downloadDashboards.env }}
- name: "{{ $key }}"
value: "{{ $value }}"
{{- end }}
volumeMounts:
- name: config
mountPath: "/etc/grafana/download_dashboards.sh"
subPath: download_dashboards.sh
- name: storage
mountPath: "/var/lib/grafana"
{{- if .Values.persistence.subPath }}
subPath: {{ .Values.persistence.subPath }}
{{- end }}
{{- range .Values.extraSecretMounts }}
- name: {{ .name }}
mountPath: {{ .mountPath }}
readOnly: {{ .readOnly }}
{{- end }}
{{- end }}
{{- if .Values.sidecar.datasources.enabled }}
- name: {{ template "grafana.name" . }}-sc-datasources
{{- if .Values.sidecar.image.sha }}
image: "{{ template "system_default_registry" . }}{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}"
{{- else }}
image: "{{ template "system_default_registry" . }}{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}"
{{- end }}
imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }}
env:
- name: METHOD
value: LIST
- name: LABEL
value: "{{ .Values.sidecar.datasources.label }}"
- name: FOLDER
value: "/etc/grafana/provisioning/datasources"
- name: RESOURCE
value: "both"
{{- if .Values.sidecar.enableUniqueFilenames }}
- name: UNIQUE_FILENAMES
value: "{{ .Values.sidecar.enableUniqueFilenames }}"
{{- end }}
{{- if .Values.sidecar.datasources.searchNamespace }}
- name: NAMESPACE
value: "{{ .Values.sidecar.datasources.searchNamespace }}"
{{- end }}
{{- if .Values.sidecar.skipTlsVerify }}
- name: SKIP_TLS_VERIFY
value: "{{ .Values.sidecar.skipTlsVerify }}"
{{- end }}
resources:
{{ toYaml .Values.sidecar.resources | indent 6 }}
volumeMounts:
- name: sc-datasources-volume
mountPath: "/etc/grafana/provisioning/datasources"
{{- end}}
{{- if .Values.sidecar.notifiers.enabled }}
- name: {{ template "grafana.name" . }}-sc-notifiers
{{- if .Values.sidecar.image.sha }}
image: "{{ template "system_default_registry" . }}{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}"
{{- else }}
image: "{{ template "system_default_registry" . }}{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}"
{{- end }}
imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }}
env:
- name: METHOD
value: LIST
- name: LABEL
value: "{{ .Values.sidecar.notifiers.label }}"
- name: FOLDER
value: "/etc/grafana/provisioning/notifiers"
- name: RESOURCE
value: "both"
{{- if .Values.sidecar.enableUniqueFilenames }}
- name: UNIQUE_FILENAMES
value: "{{ .Values.sidecar.enableUniqueFilenames }}"
{{- end }}
{{- if .Values.sidecar.notifiers.searchNamespace }}
- name: NAMESPACE
value: "{{ .Values.sidecar.notifiers.searchNamespace }}"
{{- end }}
{{- if .Values.sidecar.skipTlsVerify }}
- name: SKIP_TLS_VERIFY
value: "{{ .Values.sidecar.skipTlsVerify }}"
{{- end }}
resources:
{{ toYaml .Values.sidecar.resources | indent 6 }}
volumeMounts:
- name: sc-notifiers-volume
mountPath: "/etc/grafana/provisioning/notifiers"
{{- end}}
{{- if .Values.extraInitContainers }}
{{ toYaml .Values.extraInitContainers | indent 2 }}
{{- end }}
{{- if .Values.image.pullSecrets }}
imagePullSecrets:
{{- range .Values.image.pullSecrets }}
- name: {{ . }}
{{- end}}
{{- end }}
containers:
{{- if .Values.sidecar.dashboards.enabled }}
- name: {{ template "grafana.name" . }}-sc-dashboard
{{- if .Values.sidecar.image.sha }}
image: "{{ template "system_default_registry" . }}{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}"
{{- else }}
image: "{{ template "system_default_registry" . }}{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}"
{{- end }}
imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }}
env:
- name: METHOD
value: {{ .Values.sidecar.dashboards.watchMethod }}
- name: LABEL
value: "{{ .Values.sidecar.dashboards.label }}"
- name: FOLDER
value: "{{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }}"
- name: RESOURCE
value: "both"
{{- if .Values.sidecar.enableUniqueFilenames }}
- name: UNIQUE_FILENAMES
value: "{{ .Values.sidecar.enableUniqueFilenames }}"
{{- end }}
{{- if .Values.sidecar.dashboards.searchNamespace }}
- name: NAMESPACE
value: "{{ .Values.sidecar.dashboards.searchNamespace }}"
{{- end }}
{{- if .Values.sidecar.skipTlsVerify }}
- name: SKIP_TLS_VERIFY
value: "{{ .Values.sidecar.skipTlsVerify }}"
{{- end }}
resources:
{{ toYaml .Values.sidecar.resources | indent 6 }}
volumeMounts:
- name: sc-dashboard-volume
mountPath: {{ .Values.sidecar.dashboards.folder | quote }}
{{- end}}
- name: {{ .Chart.Name }}
{{- if .Values.image.sha }}
image: "{{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}@sha256:{{ .Values.image.sha }}"
{{- else }}
image: "{{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}"
{{- end }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- if .Values.command }}
command:
{{- range .Values.command }}
- {{ . }}
{{- end }}
{{- end}}
volumeMounts:
- name: config
mountPath: "/etc/grafana/grafana.ini"
subPath: grafana.ini
{{- if .Values.ldap.enabled }}
- name: ldap
mountPath: "/etc/grafana/ldap.toml"
subPath: ldap.toml
{{- end }}
{{- range .Values.extraConfigmapMounts }}
- name: {{ .name }}
mountPath: {{ .mountPath }}
subPath: {{ .subPath | default "" }}
readOnly: {{ .readOnly }}
{{- end }}
- name: storage
mountPath: "/var/lib/grafana"
{{- if .Values.persistence.subPath }}
subPath: {{ .Values.persistence.subPath }}
{{- end }}
{{- if .Values.dashboards }}
{{- range $provider, $dashboards := .Values.dashboards }}
{{- range $key, $value := $dashboards }}
{{- if (or (hasKey $value "json") (hasKey $value "file")) }}
- name: dashboards-{{ $provider }}
mountPath: "/var/lib/grafana/dashboards/{{ $provider }}/{{ $key }}.json"
subPath: "{{ $key }}.json"
{{- end }}
{{- end }}
{{- end }}
{{- end -}}
{{- if .Values.dashboardsConfigMaps }}
{{- range (keys .Values.dashboardsConfigMaps | sortAlpha) }}
- name: dashboards-{{ . }}
mountPath: "/var/lib/grafana/dashboards/{{ . }}"
{{- end }}
{{- end }}
{{- if .Values.datasources }}
- name: config
mountPath: "/etc/grafana/provisioning/datasources/datasources.yaml"
subPath: datasources.yaml
{{- end }}
{{- if .Values.notifiers }}
- name: config
mountPath: "/etc/grafana/provisioning/notifiers/notifiers.yaml"
subPath: notifiers.yaml
{{- end }}
{{- if .Values.dashboardProviders }}
- name: config
mountPath: "/etc/grafana/provisioning/dashboards/dashboardproviders.yaml"
subPath: dashboardproviders.yaml
{{- end }}
{{- if .Values.sidecar.dashboards.enabled }}
- name: sc-dashboard-volume
mountPath: {{ .Values.sidecar.dashboards.folder | quote }}
{{ if .Values.sidecar.dashboards.SCProvider }}
- name: sc-dashboard-provider
mountPath: "/etc/grafana/provisioning/dashboards/sc-dashboardproviders.yaml"
subPath: provider.yaml
{{- end}}
{{- end}}
{{- if .Values.sidecar.datasources.enabled }}
- name: sc-datasources-volume
mountPath: "/etc/grafana/provisioning/datasources"
{{- end}}
{{- if .Values.sidecar.notifiers.enabled }}
- name: sc-notifiers-volume
mountPath: "/etc/grafana/provisioning/notifiers"
{{- end}}
{{- range .Values.extraSecretMounts }}
- name: {{ .name }}
mountPath: {{ .mountPath }}
readOnly: {{ .readOnly }}
subPath: {{ .subPath | default "" }}
{{- end }}
{{- range .Values.extraVolumeMounts }}
- name: {{ .name }}
mountPath: {{ .mountPath }}
subPath: {{ .subPath | default "" }}
readOnly: {{ .readOnly }}
{{- end }}
{{- range .Values.extraEmptyDirMounts }}
- name: {{ .name }}
mountPath: {{ .mountPath }}
{{- end }}
ports:
- name: {{ .Values.service.portName }}
containerPort: {{ .Values.service.targetPort }}
protocol: TCP
- name: {{ .Values.podPortName }}
containerPort: 3000
protocol: TCP
env:
{{- if not .Values.env.GF_SECURITY_ADMIN_USER }}
- name: GF_SECURITY_ADMIN_USER
valueFrom:
secretKeyRef:
name: {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }}
key: {{ .Values.admin.userKey | default "admin-user" }}
{{- end }}
{{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) }}
- name: GF_SECURITY_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }}
key: {{ .Values.admin.passwordKey | default "admin-password" }}
{{- end }}
{{- if .Values.plugins }}
- name: GF_INSTALL_PLUGINS
valueFrom:
configMapKeyRef:
name: {{ template "grafana.fullname" . }}
key: plugins
{{- end }}
{{- if .Values.smtp.existingSecret }}
- name: GF_SMTP_USER
valueFrom:
secretKeyRef:
name: {{ .Values.smtp.existingSecret }}
key: {{ .Values.smtp.userKey | default "user" }}
- name: GF_SMTP_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Values.smtp.existingSecret }}
key: {{ .Values.smtp.passwordKey | default "password" }}
{{- end }}
{{- range $key, $value := .Values.envValueFrom }}
- name: {{ $key | quote }}
valueFrom:
{{ toYaml $value | indent 10 }}
{{- end }}
{{- range $key, $value := .Values.env }}
- name: "{{ $key }}"
value: "{{ $value }}"
{{- end }}
{{- if .Values.envFromSecret }}
envFrom:
- secretRef:
name: {{ tpl .Values.envFromSecret . }}
{{- end }}
{{- if .Values.envRenderSecret }}
envFrom:
- secretRef:
name: {{ template "grafana.fullname" . }}-env
{{- end }}
livenessProbe:
{{ toYaml .Values.livenessProbe | indent 6 }}
readinessProbe:
{{ toYaml .Values.readinessProbe | indent 6 }}
resources:
{{ toYaml .Values.resources | indent 6 }}
{{- with .Values.extraContainers }}
{{ tpl . $ | indent 2 }}
{{- end }}
nodeSelector: {{ include "linux-node-selector" . | nindent 2 }}
{{- if .Values.nodeSelector }}
{{ toYaml .Values.nodeSelector | indent 2 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{ toYaml . | indent 2 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 2 }}
{{- if .Values.tolerations }}
{{ toYaml .Values.tolerations | indent 2 }}
{{- end }}
volumes:
- name: config
configMap:
name: {{ template "grafana.fullname" . }}
{{- range .Values.extraConfigmapMounts }}
- name: {{ .name }}
configMap:
name: {{ .configMap }}
{{- end }}
{{- if .Values.dashboards }}
{{- range (keys .Values.dashboards | sortAlpha) }}
- name: dashboards-{{ . }}
configMap:
name: {{ template "grafana.fullname" $ }}-dashboards-{{ . }}
{{- end }}
{{- end }}
{{- if .Values.dashboardsConfigMaps }}
{{ $root := . }}
{{- range $provider, $name := .Values.dashboardsConfigMaps }}
- name: dashboards-{{ $provider }}
configMap:
name: {{ tpl $name $root }}
{{- end }}
{{- end }}
{{- if .Values.ldap.enabled }}
- name: ldap
secret:
{{- if .Values.ldap.existingSecret }}
secretName: {{ .Values.ldap.existingSecret }}
{{- else }}
secretName: {{ template "grafana.fullname" . }}
{{- end }}
items:
- key: ldap-toml
path: ldap.toml
{{- end }}
{{- if and .Values.persistence.enabled (eq .Values.persistence.type "pvc") }}
- name: storage
persistentVolumeClaim:
claimName: {{ .Values.persistence.existingClaim | default (include "grafana.fullname" .) }}
{{- else if and .Values.persistence.enabled (eq .Values.persistence.type "statefulset") }}
# nothing
{{- else }}
- name: storage
emptyDir: {}
{{- end -}}
{{- if .Values.sidecar.dashboards.enabled }}
- name: sc-dashboard-volume
emptyDir: {}
{{- if .Values.sidecar.dashboards.SCProvider }}
- name: sc-dashboard-provider
configMap:
name: {{ template "grafana.fullname" . }}-config-dashboards
{{- end }}
{{- end }}
{{- if .Values.sidecar.datasources.enabled }}
- name: sc-datasources-volume
emptyDir: {}
{{- end -}}
{{- if .Values.sidecar.notifiers.enabled }}
- name: sc-notifiers-volume
emptyDir: {}
{{- end -}}
{{- range .Values.extraSecretMounts }}
- name: {{ .name }}
secret:
secretName: {{ .secretName }}
defaultMode: {{ .defaultMode }}
{{- end }}
{{- range .Values.extraVolumeMounts }}
- name: {{ .name }}
persistentVolumeClaim:
claimName: {{ .existingClaim }}
{{- end }}
{{- range .Values.extraEmptyDirMounts }}
- name: {{ .name }}
emptyDir: {}
{{- end -}}
{{- if .Values.extraContainerVolumes }}
{{ toYaml .Values.extraContainerVolumes | indent 2 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,25 @@
{{- if and .Values.rbac.create (not .Values.rbac.namespaced) }}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- with .Values.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
name: {{ template "grafana.fullname" . }}-clusterrole
{{- if or .Values.sidecar.dashboards.enabled (or .Values.sidecar.datasources.enabled .Values.rbac.extraClusterRoleRules) }}
rules:
{{- if or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled }}
- apiGroups: [""] # "" indicates the core API group
resources: ["configmaps", "secrets"]
verbs: ["get", "watch", "list"]
{{- end}}
{{- with .Values.rbac.extraClusterRoleRules }}
{{ toYaml . | indent 0 }}
{{- end}}
{{- else }}
rules: []
{{- end}}
{{- end}}

View File

@ -0,0 +1,20 @@
{{- if and .Values.rbac.create (not .Values.rbac.namespaced) }}
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ template "grafana.fullname" . }}-clusterrolebinding
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- with .Values.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
subjects:
- kind: ServiceAccount
name: {{ template "grafana.serviceAccountName" . }}
namespace: {{ template "grafana.namespace" . }}
roleRef:
kind: ClusterRole
name: {{ template "grafana.fullname" . }}-clusterrole
apiGroup: rbac.authorization.k8s.io
{{- end -}}

View File

@ -0,0 +1,25 @@
{{- if .Values.sidecar.dashboards.enabled }}
apiVersion: v1
kind: ConfigMap
metadata:
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- with .Values.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
name: {{ template "grafana.fullname" . }}-config-dashboards
namespace: {{ template "grafana.namespace" . }}
data:
provider.yaml: |-
apiVersion: 1
providers:
- name: '{{ .Values.sidecar.dashboards.provider.name }}'
orgId: {{ .Values.sidecar.dashboards.provider.orgid }}
folder: '{{ .Values.sidecar.dashboards.provider.folder }}'
type: {{ .Values.sidecar.dashboards.provider.type }}
disableDeletion: {{ .Values.sidecar.dashboards.provider.disableDelete }}
allowUiUpdates: {{ .Values.sidecar.dashboards.provider.allowUiUpdates }}
options:
path: {{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }}
{{- end}}

View File

@ -0,0 +1,69 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "grafana.fullname" . }}
namespace: {{ template "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
data:
{{- if .Values.plugins }}
plugins: {{ join "," .Values.plugins }}
{{- end }}
grafana.ini: |
{{- range $key, $value := index .Values "grafana.ini" }}
[{{ $key }}]
{{- range $elem, $elemVal := $value }}
{{ $elem }} = {{ $elemVal }}
{{- end }}
{{- end }}
{{- if .Values.datasources }}
{{ $root := . }}
{{- range $key, $value := .Values.datasources }}
{{ $key }}: |
{{ tpl (toYaml $value | indent 4) $root }}
{{- end -}}
{{- end -}}
{{- if .Values.notifiers }}
{{- range $key, $value := .Values.notifiers }}
{{ $key }}: |
{{ toYaml $value | indent 4 }}
{{- end -}}
{{- end -}}
{{- if .Values.dashboardProviders }}
{{- range $key, $value := .Values.dashboardProviders }}
{{ $key }}: |
{{ toYaml $value | indent 4 }}
{{- end -}}
{{- end -}}
{{- if .Values.dashboards }}
download_dashboards.sh: |
#!/usr/bin/env sh
set -euf
{{- if .Values.dashboardProviders }}
{{- range $key, $value := .Values.dashboardProviders }}
{{- range $value.providers }}
mkdir -p {{ .options.path }}
{{- end }}
{{- end }}
{{- end }}
{{- range $provider, $dashboards := .Values.dashboards }}
{{- range $key, $value := $dashboards }}
{{- if (or (hasKey $value "gnetId") (hasKey $value "url")) }}
curl -skf \
--connect-timeout 60 \
--max-time 60 \
{{- if not $value.b64content }}
-H "Accept: application/json" \
-H "Content-Type: application/json;charset=UTF-8" \
{{ end }}
{{- if $value.url -}}"{{ $value.url }}"{{- else -}}"https://grafana.com/api/dashboards/{{ $value.gnetId }}/revisions/{{- if $value.revision -}}{{ $value.revision }}{{- else -}}1{{- end -}}/download"{{- end -}}{{ if $value.datasource }} | sed '/-- .* --/! s/"datasource":.*,/"datasource": "{{ $value.datasource }}",/g'{{ end }}{{- if $value.b64content -}} | base64 -d {{- end -}} \
> "/var/lib/grafana/dashboards/{{ $provider }}/{{ $key }}.json"
{{- end -}}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,35 @@
{{- if .Values.dashboards }}
{{ $files := .Files }}
{{- range $provider, $dashboards := .Values.dashboards }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "grafana.fullname" $ }}-dashboards-{{ $provider }}
namespace: {{ template "grafana.namespace" $ }}
labels:
{{- include "grafana.labels" $ | nindent 4 }}
dashboard-provider: {{ $provider }}
{{- if $dashboards }}
data:
{{- $dashboardFound := false }}
{{- range $key, $value := $dashboards }}
{{- if (or (hasKey $value "json") (hasKey $value "file")) }}
{{- $dashboardFound = true }}
{{ print $key | indent 2 }}.json:
{{- if hasKey $value "json" }}
|-
{{ $value.json | indent 6 }}
{{- end }}
{{- if hasKey $value "file" }}
{{ toYaml ( $files.Get $value.file ) | indent 4}}
{{- end }}
{{- end }}
{{- end }}
{{- if not $dashboardFound }}
{}
{{- end }}
{{- end }}
---
{{- end }}
{{- end }}

View File

@ -0,0 +1,47 @@
{{ if (or (not .Values.persistence.enabled) (eq .Values.persistence.type "pvc")) }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "grafana.fullname" . }}
namespace: {{ template "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- if .Values.labels }}
{{ toYaml .Values.labels | indent 4 }}
{{- end }}
{{- with .Values.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
replicas: {{ .Values.replicas }}
selector:
matchLabels:
{{- include "grafana.selectorLabels" . | nindent 6 }}
{{- with .Values.deploymentStrategy }}
strategy:
{{ toYaml . | trim | indent 4 }}
{{- end }}
template:
metadata:
labels:
{{- include "grafana.selectorLabels" . | nindent 8 }}
{{- with .Values.podLabels }}
{{ toYaml . | indent 8 }}
{{- end }}
annotations:
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
checksum/dashboards-json-config: {{ include (print $.Template.BasePath "/dashboards-json-configmap.yaml") . | sha256sum }}
checksum/sc-dashboard-provider-config: {{ include (print $.Template.BasePath "/configmap-dashboard-provider.yaml") . | sha256sum }}
{{- if and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) }}
checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}
{{- end }}
{{- if .Values.envRenderSecret }}
checksum/secret-env: {{ include (print $.Template.BasePath "/secret-env.yaml") . | sha256sum }}
{{- end }}
{{- with .Values.podAnnotations }}
{{ toYaml . | indent 8 }}
{{- end }}
spec:
{{- include "grafana.pod" . | nindent 6 }}
{{- end }}

View File

@ -0,0 +1,18 @@
{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (eq .Values.persistence.type "statefulset")}}
apiVersion: v1
kind: Service
metadata:
name: {{ template "grafana.fullname" . }}-headless
namespace: {{ template "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- with .Values.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
clusterIP: None
selector:
{{- include "grafana.selectorLabels" . | nindent 4 }}
type: ClusterIP
{{- end }}

View File

@ -0,0 +1,55 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "grafana.fullname" . -}}
{{- $servicePort := .Values.service.port -}}
{{- $ingressPath := .Values.ingress.path -}}
{{- $extraPaths := .Values.ingress.extraPaths -}}
{{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }}
apiVersion: networking.k8s.io/v1beta1
{{ else }}
apiVersion: extensions/v1beta1
{{ end -}}
kind: Ingress
metadata:
name: {{ $fullName }}
namespace: {{ template "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- if .Values.ingress.labels }}
{{ toYaml .Values.ingress.labels | indent 4 }}
{{- end }}
{{- if .Values.ingress.annotations }}
annotations:
{{- range $key, $value := .Values.ingress.annotations }}
{{ $key }}: {{ tpl $value $ | quote }}
{{- end }}
{{- end }}
spec:
{{- if .Values.ingress.tls }}
tls:
{{ toYaml .Values.ingress.tls | indent 4 }}
{{- end }}
rules:
{{- if .Values.ingress.hosts }}
{{- range .Values.ingress.hosts }}
- host: {{ . }}
http:
paths:
{{ if $extraPaths }}
{{ toYaml $extraPaths | indent 10 }}
{{- end }}
- path: {{ $ingressPath }}
backend:
serviceName: {{ $fullName }}
servicePort: {{ $servicePort }}
{{- end }}
{{- else }}
- http:
paths:
- backend:
serviceName: {{ $fullName }}
servicePort: {{ $servicePort }}
{{- if $ingressPath }}
path: {{ $ingressPath }}
{{- end }}
{{- end -}}
{{- end }}

View File

@ -0,0 +1,75 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: grafana-nginx-proxy-config
namespace: {{ template "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
data:
nginx.conf: |-
worker_processes auto;
error_log /dev/stdout warn;
pid /var/cache/nginx/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
log_format main '[$time_local - $status] $remote_addr - $remote_user $request ($http_referer)';
proxy_connect_timeout 10;
proxy_read_timeout 180;
proxy_send_timeout 5;
proxy_buffering off;
proxy_cache_path /var/cache/nginx/cache levels=1:2 keys_zone=my_zone:100m inactive=1d max_size=10g;
server {
listen 8080;
access_log off;
gzip on;
gzip_min_length 1k;
gzip_comp_level 2;
gzip_types text/plain application/javascript application/x-javascript text/css application/xml text/javascript image/jpeg image/gif image/png;
gzip_vary on;
gzip_disable "MSIE [1-6]\.";
proxy_set_header Host $host;
location /api/dashboards {
proxy_pass http://localhost:3000;
}
location /api/search {
proxy_pass http://localhost:3000;
sub_filter_types application/json;
sub_filter_once off;
sub_filter '"url":"/d' '"url":"d';
}
location / {
proxy_cache my_zone;
proxy_cache_valid 200 302 1d;
proxy_cache_valid 301 30d;
proxy_cache_valid any 5m;
proxy_cache_bypass $http_cache_control;
add_header X-Proxy-Cache $upstream_cache_status;
add_header Cache-Control "public";
proxy_pass http://localhost:3000/;
sub_filter_types text/html;
sub_filter_once off;
sub_filter '"appSubUrl":""' '"appSubUrl":"."';
sub_filter '"url":"/' '"url":"./';
sub_filter ':"/avatar/' ':"avatar/';
if ($request_filename ~ .*\.(?:js|css|jpg|jpeg|gif|png|ico|cur|gz|svg|svgz|mp4|ogg|ogv|webm)$) {
expires 90d;
}
}
}
}

View File

@ -0,0 +1,22 @@
{{- if .Values.podDisruptionBudget }}
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: {{ template "grafana.fullname" . }}
namespace: {{ template "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- if .Values.labels }}
{{ toYaml .Values.labels | indent 4 }}
{{- end }}
spec:
{{- if .Values.podDisruptionBudget.minAvailable }}
minAvailable: {{ .Values.podDisruptionBudget.minAvailable }}
{{- end }}
{{- if .Values.podDisruptionBudget.maxUnavailable }}
maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }}
{{- end }}
selector:
matchLabels:
{{- include "grafana.selectorLabels" . | nindent 6 }}
{{- end }}

View File

@ -0,0 +1,48 @@
{{- if .Values.rbac.pspEnabled }}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ template "grafana.fullname" . }}
namespace: {{ template "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- if .Values.rbac.pspAnnotations }}
annotations: {{ toYaml .Values.rbac.pspAnnotations | nindent 4 }}
{{- end }}
spec:
privileged: false
allowPrivilegeEscalation: false
requiredDropCapabilities:
# Default set from Docker, without DAC_OVERRIDE or CHOWN
- FOWNER
- FSETID
- KILL
- SETGID
- SETUID
- SETPCAP
- NET_BIND_SERVICE
- NET_RAW
- SYS_CHROOT
- MKNOD
- AUDIT_WRITE
- SETFCAP
volumes:
- 'configMap'
- 'emptyDir'
- 'projected'
- 'secret'
- 'downwardAPI'
- 'persistentVolumeClaim'
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'RunAsAny'
fsGroup:
rule: 'RunAsAny'
readOnlyRootFilesystem: false
{{- end }}

View File

@ -0,0 +1,28 @@
{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (eq .Values.persistence.type "pvc")}}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ template "grafana.fullname" . }}
namespace: {{ template "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- with .Values.persistence.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
{{- with .Values.persistence.finalizers }}
finalizers:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
accessModes:
{{- range .Values.persistence.accessModes }}
- {{ . | quote }}
{{- end }}
resources:
requests:
storage: {{ .Values.persistence.size | quote }}
{{- if .Values.persistence.storageClassName }}
storageClassName: {{ .Values.persistence.storageClassName }}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,32 @@
{{- if .Values.rbac.create -}}
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
name: {{ template "grafana.fullname" . }}
namespace: {{ template "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- with .Values.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
{{- if or .Values.rbac.pspEnabled (and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled (or .Values.sidecar.datasources.enabled .Values.rbac.extraRoleRules))) }}
rules:
{{- if .Values.rbac.pspEnabled }}
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: [{{ template "grafana.fullname" . }}]
{{- end }}
{{- if and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled) }}
- apiGroups: [""] # "" indicates the core API group
resources: ["configmaps", "secrets"]
verbs: ["get", "watch", "list"]
{{- end }}
{{- with .Values.rbac.extraRoleRules }}
{{ toYaml . | indent 0 }}
{{- end}}
{{- else }}
rules: []
{{- end }}
{{- end }}

View File

@ -0,0 +1,21 @@
{{- if .Values.rbac.create -}}
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: {{ template "grafana.fullname" . }}
namespace: {{ template "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- with .Values.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ template "grafana.fullname" . }}
subjects:
- kind: ServiceAccount
name: {{ template "grafana.serviceAccountName" . }}
namespace: {{ template "grafana.namespace" . }}
{{- end -}}

View File

@ -0,0 +1,14 @@
{{- if .Values.envRenderSecret }}
apiVersion: v1
kind: Secret
metadata:
name: {{ template "grafana.fullname" . }}-env
namespace: {{ template "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
type: Opaque
data:
{{- range $key, $val := .Values.envRenderSecret }}
{{ $key }}: {{ $val | b64enc | quote }}
{{- end -}}
{{- end }}

View File

@ -0,0 +1,22 @@
{{- if or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret)) }}
apiVersion: v1
kind: Secret
metadata:
name: {{ template "grafana.fullname" . }}
namespace: {{ template "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
type: Opaque
data:
{{- if and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) }}
admin-user: {{ .Values.adminUser | b64enc | quote }}
{{- if .Values.adminPassword }}
admin-password: {{ .Values.adminPassword | b64enc | quote }}
{{- else }}
admin-password: {{ randAlphaNum 40 | b64enc | quote }}
{{- end }}
{{- end }}
{{- if not .Values.ldap.existingSecret }}
ldap-toml: {{ .Values.ldap.config | b64enc | quote }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,50 @@
apiVersion: v1
kind: Service
metadata:
name: {{ template "grafana.fullname" . }}
namespace: {{ template "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- if .Values.service.labels }}
{{ toYaml .Values.service.labels | indent 4 }}
{{- end }}
{{- with .Values.service.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
{{- if (or (eq .Values.service.type "ClusterIP") (empty .Values.service.type)) }}
type: ClusterIP
{{- if .Values.service.clusterIP }}
clusterIP: {{ .Values.service.clusterIP }}
{{end}}
{{- else if eq .Values.service.type "LoadBalancer" }}
type: {{ .Values.service.type }}
{{- if .Values.service.loadBalancerIP }}
loadBalancerIP: {{ .Values.service.loadBalancerIP }}
{{- end }}
{{- if .Values.service.loadBalancerSourceRanges }}
loadBalancerSourceRanges:
{{ toYaml .Values.service.loadBalancerSourceRanges | indent 4 }}
{{- end -}}
{{- else }}
type: {{ .Values.service.type }}
{{- end }}
{{- if .Values.service.externalIPs }}
externalIPs:
{{ toYaml .Values.service.externalIPs | indent 4 }}
{{- end }}
ports:
- name: {{ .Values.service.portName }}
port: {{ .Values.service.port }}
protocol: TCP
targetPort: {{ .Values.service.targetPort }}
{{ if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }}
nodePort: {{.Values.service.nodePort}}
{{ end }}
{{- if .Values.extraExposePorts }}
{{- tpl (toYaml .Values.extraExposePorts) . | indent 4 }}
{{- end }}
selector:
{{- include "grafana.selectorLabels" . | nindent 4 }}

View File

@ -0,0 +1,13 @@
{{- if .Values.serviceAccount.create }}
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
name: {{ template "grafana.serviceAccountName" . }}
namespace: {{ template "grafana.namespace" . }}
{{- end }}

View File

@ -0,0 +1,36 @@
{{- if .Values.serviceMonitor.enabled }}
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "grafana.fullname" . }}
{{- if .Values.serviceMonitor.namespace }}
namespace: {{ .Values.serviceMonitor.namespace }}
{{- end }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- if .Values.serviceMonitor.labels }}
{{- toYaml .Values.serviceMonitor.labels | nindent 4 }}
{{- end }}
spec:
endpoints:
- interval: {{ .Values.serviceMonitor.interval }}
{{- if .Values.serviceMonitor.scrapeTimeout }}
scrapeTimeout: {{ .Values.serviceMonitor.scrapeTimeout }}
{{- end }}
honorLabels: true
port: {{ .Values.service.portName }}
path: {{ .Values.serviceMonitor.path }}
{{- if .Values.serviceMonitor.relabelings }}
relabelings:
{{- toYaml .Values.serviceMonitor.relabelings | nindent 4 }}
{{- end }}
jobLabel: "{{ .Release.Name }}"
selector:
matchLabels:
app: {{ template "grafana.name" . }}
release: "{{ .Release.Name }}"
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
{{- end }}

View File

@ -0,0 +1,47 @@
{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (eq .Values.persistence.type "statefulset")}}
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ template "grafana.fullname" . }}
namespace: {{ template "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- with .Values.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
replicas: {{ .Values.replicas }}
selector:
matchLabels:
{{- include "grafana.selectorLabels" . | nindent 6 }}
serviceName: {{ template "grafana.fullname" . }}-headless
template:
metadata:
labels:
{{- include "grafana.selectorLabels" . | nindent 8 }}
{{- with .Values.podLabels }}
{{ toYaml . | indent 8 }}
{{- end }}
annotations:
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
checksum/dashboards-json-config: {{ include (print $.Template.BasePath "/dashboards-json-configmap.yaml") . | sha256sum }}
checksum/sc-dashboard-provider-config: {{ include (print $.Template.BasePath "/configmap-dashboard-provider.yaml") . | sha256sum }}
{{- if not .Values.admin.existingSecret }}
checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}
{{- end }}
{{- with .Values.podAnnotations }}
{{ toYaml . | indent 8 }}
{{- end }}
spec:
{{- include "grafana.pod" . | nindent 6 }}
volumeClaimTemplates:
- metadata:
name: storage
spec:
accessModes: {{ .Values.persistence.accessModes }}
storageClassName: {{ .Values.persistence.storageClassName }}
resources:
requests:
storage: {{ .Values.persistence.size }}
{{- end }}

View File

@ -0,0 +1,17 @@
{{- if .Values.testFramework.enabled }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "grafana.fullname" . }}-test
namespace: {{ template "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
data:
run.sh: |-
@test "Test Health" {
url="http://{{ template "grafana.fullname" . }}/api/health"
code=$(wget --server-response --spider --timeout 10 --tries 1 ${url} 2>&1 | awk '/^ HTTP/{print $2}')
[ "$code" == "200" ]
}
{{- end }}

View File

@ -0,0 +1,29 @@
{{- if and .Values.testFramework.enabled .Values.rbac.pspEnabled }}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ template "grafana.fullname" . }}-test
namespace: {{ template "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
spec:
allowPrivilegeEscalation: true
privileged: false
hostNetwork: false
hostIPC: false
hostPID: false
fsGroup:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
runAsUser:
rule: RunAsAny
volumes:
- configMap
- downwardAPI
- emptyDir
- projected
- secret
{{- end }}

View File

@ -0,0 +1,14 @@
{{- if and .Values.testFramework.enabled .Values.rbac.pspEnabled -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ template "grafana.fullname" . }}-test
namespace: {{ template "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
rules:
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: [{{ template "grafana.fullname" . }}-test]
{{- end }}

View File

@ -0,0 +1,17 @@
{{- if and .Values.testFramework.enabled .Values.rbac.pspEnabled -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ template "grafana.fullname" . }}-test
namespace: {{ template "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ template "grafana.fullname" . }}-test
subjects:
- kind: ServiceAccount
name: {{ template "grafana.serviceAccountNameTest" . }}
namespace: {{ template "grafana.namespace" . }}
{{- end }}

View File

@ -0,0 +1,9 @@
{{- if and .Values.testFramework.enabled .Values.serviceAccount.create }}
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
{{- include "grafana.labels" . | nindent 4 }}
name: {{ template "grafana.serviceAccountNameTest" . }}
namespace: {{ template "grafana.namespace" . }}
{{- end }}

View File

@ -0,0 +1,48 @@
{{- if .Values.testFramework.enabled }}
apiVersion: v1
kind: Pod
metadata:
name: {{ template "grafana.fullname" . }}-test
labels:
{{- include "grafana.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": test-success
namespace: {{ template "grafana.namespace" . }}
spec:
serviceAccountName: {{ template "grafana.serviceAccountNameTest" . }}
{{- if .Values.testFramework.securityContext }}
securityContext: {{ toYaml .Values.testFramework.securityContext | nindent 4 }}
{{- end }}
{{- if .Values.image.pullSecrets }}
imagePullSecrets:
{{- range .Values.image.pullSecrets }}
- name: {{ . }}
{{- end}}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{ toYaml . | indent 4 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{ toYaml . | indent 4 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{ toYaml . | indent 4 }}
{{- end }}
containers:
- name: {{ .Release.Name }}-test
image: "{{ .Values.testFramework.image}}:{{ .Values.testFramework.tag }}"
imagePullPolicy: "{{ .Values.testFramework.imagePullPolicy}}"
command: ["/opt/bats/bin/bats", "-t", "/tests/run.sh"]
volumeMounts:
- mountPath: /tests
name: tests
readOnly: true
volumes:
- name: tests
configMap:
name: {{ template "grafana.fullname" . }}-test
restartPolicy: Never
{{- end }}

Some files were not shown because too many files have changed in this diff Show More