Make charts - NewRelic

pull/417/head
shawnj 2022-05-13 06:33:21 -07:00
parent 7ad9914c82
commit e7abe463f8
371 changed files with 23737 additions and 0 deletions

Binary file not shown.

View File

@ -0,0 +1,22 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,33 @@
dependencies:
- name: newrelic-infrastructure
repository: https://newrelic.github.io/nri-kubernetes
version: 3.3.3
- name: nri-prometheus
repository: https://newrelic.github.io/nri-prometheus
version: 2.1.1
- name: nri-metadata-injection
repository: https://newrelic.github.io/k8s-metadata-injection
version: 3.0.1
- name: newrelic-k8s-metrics-adapter
repository: https://newrelic.github.io/newrelic-k8s-metrics-adapter
version: 0.7.4
- name: kube-state-metrics
repository: https://kubernetes.github.io/kube-state-metrics
version: 2.13.2
- name: nri-kube-events
repository: https://newrelic.github.io/nri-kube-events
version: 2.2.2
- name: newrelic-logging
repository: https://newrelic.github.io/helm-charts
version: 1.10.9
- name: newrelic-pixie
repository: https://newrelic.github.io/helm-charts
version: 1.5.1
- name: pixie-operator-chart
repository: https://pixie-operator-charts.storage.googleapis.com
version: 0.0.26
- name: newrelic-infra-operator
repository: https://newrelic.github.io/newrelic-infra-operator
version: 0.6.0
digest: sha256:991e5176a2233cedb6eb1fbf49cd38ce52d2123a2fadf5a352e96cc42a672e8e
generated: "2022-05-04T13:51:43.981149+02:00"

View File

@ -0,0 +1,77 @@
annotations:
catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: New Relic
catalog.cattle.io/release-name: newrelic-bundle
apiVersion: v2
dependencies:
- condition: ksm.enabled
name: kube-state-metrics
repository: file://./charts/kube-state-metrics
- condition: newrelic-infra-operator.enabled
name: newrelic-infra-operator
repository: file://./charts/newrelic-infra-operator
- condition: infrastructure.enabled
name: newrelic-infrastructure
repository: file://./charts/newrelic-infrastructure
- condition: metrics-adapter.enabled
name: newrelic-k8s-metrics-adapter
repository: file://./charts/newrelic-k8s-metrics-adapter
- condition: logging.enabled
name: newrelic-logging
repository: file://./charts/newrelic-logging
- condition: newrelic-pixie.enabled
name: newrelic-pixie
repository: file://./charts/newrelic-pixie
- condition: kubeEvents.enabled
name: nri-kube-events
repository: file://./charts/nri-kube-events
- condition: webhook.enabled
name: nri-metadata-injection
repository: file://./charts/nri-metadata-injection
- condition: prometheus.enabled
name: nri-prometheus
repository: file://./charts/nri-prometheus
- alias: pixie-chart
condition: pixie-chart.enabled
name: pixie-operator-chart
repository: file://./charts/pixie-operator-chart
version: 0.0.26
description: A chart groups together the individual charts for the New Relic Kubernetes
solution for more comfortable deployment.
home: https://github.com/newrelic/helm-charts
icon: https://newrelic.com/themes/custom/curio/assets/mediakit/new_relic_logo_vertical.png
keywords:
- infrastructure
- newrelic
- monitoring
kubeVersion: 1.16-0 - 1.22-0
maintainers:
- name: alvarocabanas
url: https://github.com/alvarocabanas
- name: carlossscastro
url: https://github.com/carlossscastro
- name: sigilioso
url: https://github.com/sigilioso
- name: gsanchezgavier
url: https://github.com/gsanchezgavier
- name: kang-makes
url: https://github.com/kang-makes
- name: marcsanmi
url: https://github.com/marcsanmi
- name: paologallinaharbur
url: https://github.com/paologallinaharbur
- name: roobre
url: https://github.com/roobre
name: nri-bundle
sources:
- https://github.com/newrelic/nri-bundle/
- https://github.com/newrelic/nri-bundle/tree/master/charts/nri-bundle
- https://github.com/newrelic/nri-kubernetes/tree/master/charts/newrelic-infrastructure
- https://github.com/newrelic/nri-prometheus/tree/master/charts/nri-prometheus
- https://github.com/newrelic/k8s-metadata-injection/tree/master/charts/nri-metadata-injection
- https://github.com/newrelic/newrelic-k8s-metrics-adapter/tree/master/charts/newrelic-k8s-metrics-adapter
- https://github.com/newrelic/nri-kube-events/tree/master/charts/nri-kube-events
- https://github.com/newrelic/helm-charts/tree/master/charts/newrelic-logging
- https://github.com/newrelic/helm-charts/tree/master/charts/newrelic-pixie
- https://github.com/newrelic/newrelic-infra-operator/tree/master/charts/newrelic-infra-operator
version: 4.3.200

View File

@ -0,0 +1,138 @@
# nri-bundle
![Version: 4.3.2](https://img.shields.io/badge/Version-4.3.2-informational?style=flat-square)
A chart groups together the individual charts for the New Relic Kubernetes solution for more comfortable deployment.
**Homepage:** <https://github.com/newrelic/helm-charts>
## Configure components
It is possible to configure settings for the individual charts this chart groups by specifying values for them under a key using the name of the chart,
as specified in [helm documentation](https://helm.sh/docs/chart_template_guide/subcharts_and_globals).
For example, by adding the following to the `values.yml` file:
```yaml
# Configuration settings for the newrelic-infrastructure chart
newrelic-infrastructure:
# Any key defined in the values.yml file for the newrelic-infrastructure chart can be configured here:
# https://github.com/newrelic/nri-kubernetes/blob/master/charts/newrelic-infrastructure/values.yaml
verboseLog: false
resources:
limits:
memory: 512M
```
It is possible to override any entry of the [`newrelic-infrastructure`](https://github.com/newrelic/nri-kubernetes/tree/master/charts/newrelic-infrastructure)
chart, as defined in their [`values.yml` file](https://github.com/newrelic/nri-kubernetes/blob/master/charts/newrelic-infrastructure/values.yaml).
The same approach can be followed to update any of the subcharts.
After making these changes to the `values.yml` file, or a custom values file, make sure to apply them using:
```
$ helm upgrade --reuse-values -f values.yaml [RELEASE] newrelic/nri-bundle
```
Where `[RELEASE]` is the name of the helm release, e.g. `newrelic-bundle`.
## Monitor on host integrations
If you wish to monitor services running on Kubernetes you can provide integrations
configuration under `integrations_config` that it will passed down to the `newrelic-infrastructure` chart.
You just need to create a new entry where the "name" is the filename of the configuration file and the data is the content of
the integration configuration. The name must end in ".yaml" as this will be the
filename generated and the Infrastructure agent only looks for YAML files.
The data part is the actual integration configuration as described in the spec here:
https://docs.newrelic.com/docs/integrations/integrations-sdk/file-specifications/integration-configuration-file-specifications-agent-v180
In the following example you can see how to monitor a Redis integration with autodiscovery
```yaml
newrelic-infrastructure:
nri-redis-sampleapp:
discovery:
command:
exec: /var/db/newrelic-infra/nri-discovery-kubernetes --tls --port 10250
match:
label.app: sampleapp
integrations:
- name: nri-redis
env:
# using the discovered IP as the hostname address
HOSTNAME: ${discovery.ip}
PORT: 6379
labels:
env: test
```
## Values managed globally
Some of the subchart implement the [New Relic's common Helm library](https://github.com/newrelic/helm-charts/tree/master/library/common-library) which
means that it honors a wide range of defaults and globals common to most New Relic Helm charts.
Options that can be defined globally include `affinity`, `nodeSelector`, `tolerations`, `proxy` and others. The full list can be found at
[user's guide of the common library](https://github.com/newrelic/helm-charts/blob/master/library/common-library/README.md).
At the time of writing this document, all the charts from `nri-bundle` except `newrelic-logging` and `synthetics-minion` implements this library and
honors global options as described below.
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| global | object | See [`values.yaml`](values.yaml) | change the behaviour globally to all the supported helm charts. See [user's guide of the common library](https://github.com/newrelic/helm-charts/blob/master/library/common-library/README.md) for further information. |
| global.affinity | object | `{}` | Sets pod/node affinities |
| global.cluster | string | `""` | The cluster name for the Kubernetes cluster. |
| global.containerSecurityContext | object | `{}` | Sets security context (at container level) |
| global.customAttributes | object | `{}` | Adds extra attributes to the cluster and all the metrics emitted to the backend |
| global.customSecretInsightsKey | string | `""` | Key in the Secret object where the insights key is stored |
| global.customSecretLicenseKey | string | `""` | Key in the Secret object where the license key is stored |
| global.customSecretName | string | `""` | Name of the Secret object where the license key is stored |
| global.dnsConfig | object | `{}` | Sets pod's dnsConfig |
| global.fargate | bool | false | Must be set to `true` when deploying in an EKS Fargate environment |
| global.hostNetwork | bool | false | Sets pod's hostNetwork |
| global.images.pullSecrets | list | `[]` | Set secrets to be able to fetch images |
| global.images.registry | string | `""` | Changes the registry where to get the images. Useful when there is an internal image cache/proxy |
| global.insightsKey | string | `""` | The license key for your New Relic Account. This will be preferred configuration option if both `insightsKey` and `customSecret` are specified. |
| global.labels | object | `{}` | Additional labels for chart objects |
| global.licenseKey | string | `""` | The license key for your New Relic Account. This will be preferred configuration option if both `licenseKey` and `customSecret` are specified. |
| global.lowDataMode | bool | false | Reduces number of metrics sent in order to reduce costs |
| global.nodeSelector | object | `{}` | Sets pod's node selector |
| global.nrStaging | bool | false | Send the metrics to the staging backend. Requires a valid staging license key |
| global.podLabels | object | `{}` | Additional labels for chart pods |
| global.podSecurityContext | object | `{}` | Sets security context (at pod level) |
| global.priorityClassName | string | `""` | Sets pod's priorityClassName |
| global.privileged | bool | false | In each integration it has different behavior. See [Further information](#values-managed-globally-3) but all aims to send less metrics to the backend to try to save costs | |
| global.proxy | string | `""` | Configures the integration to send all HTTP/HTTPS request through the proxy in that URL. The URL should have a standard format like `https://user:password@hostname:port` |
| global.serviceAccount.annotations | object | `{}` | Add these annotations to the service account we create |
| global.serviceAccount.create | string | `nil` | Configures if the service account should be created or not |
| global.serviceAccount.name | string | `nil` | Change the name of the service account. This is honored if you disable on this chart the creation of the service account so you can use your own |
| global.tolerations | list | `[]` | Sets pod's tolerations to node taints |
| global.verboseLog | bool | false | Sets the debug logs to this integration or all integrations if it is set globally |
| infrastructure.enabled | bool | `true` | Install the [`newrelic-infrastructure` chart](https://github.com/newrelic/nri-kubernetes/tree/main/charts/newrelic-infrastructure) |
| ksm.enabled | bool | `false` | Install the [`kube-state-metrics` chart from the stable helm charts repository](https://github.com/kubernetes/kube-state-metrics/tree/master/charts/kube-state-metrics) This is mandatory if `infrastructure.enabled` is set to `true` and the user does not provide its own instance of KSM version >=1.8 and <=2.0 |
| kubeEvents.enabled | bool | `false` | Install the [`nri-kube-events` chart](https://github.com/newrelic/nri-kube-events/tree/main/charts/nri-kube-events) |
| logging.enabled | bool | `false` | Install the [`newrelic-logging` chart](https://github.com/newrelic/helm-charts/tree/master/charts/newrelic-logging) |
| metrics-adapter.enabled | bool | `false` | Install the [`newrelic-k8s-metrics-adapter.` chart](https://github.com/newrelic/newrelic-k8s-metrics-adapter/tree/main/charts/newrelic-k8s-metrics-adapter) (Beta) |
| newrelic-infra-operator.enabled | bool | `false` | Install the [`newrelic-infra-operator` chart](https://github.com/newrelic/newrelic-infra-operator/tree/main/charts/newrelic-infra-operator) (Beta) |
| newrelic-pixie.enabled | bool | `false` | Install the [`newrelic-pixie`](https://github.com/newrelic/helm-charts/tree/master/charts/newrelic-pixie) |
| pixie-chart.enabled | bool | `false` | Install the [`pixie-chart` chart](https://docs.pixielabs.ai/installing-pixie/install-schemes/helm/#3.-deploy) |
| prometheus.enabled | bool | `false` | Install the [`nri-prometheus` chart](https://github.com/newrelic/nri-prometheus/tree/main/charts/nri-prometheus) |
| webhook.enabled | bool | `true` | Install the [`nri-metadata-injection` chart](https://github.com/newrelic/k8s-metadata-injection/tree/main/charts/nri-metadata-injection) |
## Maintainers
* [alvarocabanas](https://github.com/alvarocabanas)
* [carlossscastro](https://github.com/carlossscastro)
* [sigilioso](https://github.com/sigilioso)
* [gsanchezgavier](https://github.com/gsanchezgavier)
* [kang-makes](https://github.com/kang-makes)
* [marcsanmi](https://github.com/marcsanmi)
* [paologallinaharbur](https://github.com/paologallinaharbur)
* [roobre](https://github.com/roobre)

View File

@ -0,0 +1,100 @@
{{ template "chart.header" . }}
{{ template "chart.deprecationWarning" . }}
{{ template "chart.badgesSection" . }}
{{ template "chart.description" . }}
{{ template "chart.homepageLine" . }}
## Configure components
It is possible to configure settings for the individual charts this chart groups by specifying values for them under a key using the name of the chart,
as specified in [helm documentation](https://helm.sh/docs/chart_template_guide/subcharts_and_globals).
For example, by adding the following to the `values.yml` file:
```yaml
# Configuration settings for the newrelic-infrastructure chart
newrelic-infrastructure:
# Any key defined in the values.yml file for the newrelic-infrastructure chart can be configured here:
# https://github.com/newrelic/nri-kubernetes/blob/master/charts/newrelic-infrastructure/values.yaml
verboseLog: false
resources:
limits:
memory: 512M
```
It is possible to override any entry of the [`newrelic-infrastructure`](https://github.com/newrelic/nri-kubernetes/tree/master/charts/newrelic-infrastructure)
chart, as defined in their [`values.yml` file](https://github.com/newrelic/nri-kubernetes/blob/master/charts/newrelic-infrastructure/values.yaml).
The same approach can be followed to update any of the subcharts.
After making these changes to the `values.yml` file, or a custom values file, make sure to apply them using:
```
$ helm upgrade --reuse-values -f values.yaml [RELEASE] newrelic/nri-bundle
```
Where `[RELEASE]` is the name of the helm release, e.g. `newrelic-bundle`.
## Monitor on host integrations
If you wish to monitor services running on Kubernetes you can provide integrations
configuration under `integrations_config` that it will passed down to the `newrelic-infrastructure` chart.
You just need to create a new entry where the "name" is the filename of the configuration file and the data is the content of
the integration configuration. The name must end in ".yaml" as this will be the
filename generated and the Infrastructure agent only looks for YAML files.
The data part is the actual integration configuration as described in the spec here:
https://docs.newrelic.com/docs/integrations/integrations-sdk/file-specifications/integration-configuration-file-specifications-agent-v180
In the following example you can see how to monitor a Redis integration with autodiscovery
```yaml
newrelic-infrastructure:
nri-redis-sampleapp:
discovery:
command:
exec: /var/db/newrelic-infra/nri-discovery-kubernetes --tls --port 10250
match:
label.app: sampleapp
integrations:
- name: nri-redis
env:
# using the discovered IP as the hostname address
HOSTNAME: ${discovery.ip}
PORT: 6379
labels:
env: test
```
## Values managed globally
Some of the subchart implement the [New Relic's common Helm library](https://github.com/newrelic/helm-charts/tree/master/library/common-library) which
means that it honors a wide range of defaults and globals common to most New Relic Helm charts.
Options that can be defined globally include `affinity`, `nodeSelector`, `tolerations`, `proxy` and others. The full list can be found at
[user's guide of the common library](https://github.com/newrelic/helm-charts/blob/master/library/common-library/README.md).
At the time of writing this document, all the charts from `nri-bundle` except `newrelic-logging` and `synthetics-minion` implements this library and
honors global options as described below.
{{ template "chart.valuesSection" . }}
{{ if .Maintainers }}
## Maintainers
{{ range .Maintainers }}
{{- if .Name }}
{{- if .Url }}
* [{{ .Name }}]({{ .Url }})
{{- else }}
* {{ .Name }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,5 @@
# New Relic Kubernetes Integration
New Relic's Kubernetes integration gives you full observability into the health and performance of your environment, no matter whether you run Kubernetes on-premises or in the cloud. With our [cluster explorer](https://docs.newrelic.com/docs/integrations/kubernetes-integration/cluster-explorer/kubernetes-cluster-explorer), you can cut through layers of complexity to see how your cluster is performing, from the heights of the control plane down to applications running on a single pod.
You can see the power of the Kubernetes integration in the [cluster explorer](https://docs.newrelic.com/docs/integrations/kubernetes-integration/cluster-explorer/kubernetes-cluster-explorer), where the full picture of a cluster is made available on a single screen: nodes and pods are visualized according to their health and performance, with pending and alerting nodes in the innermost circles. [Predefined alert conditions](https://docs.newrelic.com/docs/integrations/kubernetes-integration/kubernetes-events/kubernetes-integration-predefined-alert-policy) help you troubleshoot issues right from the start. Clicking each node reveals its status and how each app is performing.

View File

@ -0,0 +1,21 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj

View File

@ -0,0 +1,18 @@
apiVersion: v1
appVersion: 1.9.8
description: Install kube-state-metrics to generate and expose cluster-level metrics
home: https://github.com/kubernetes/kube-state-metrics/
keywords:
- metric
- monitoring
- prometheus
- kubernetes
maintainers:
- email: tariq.ibrahim@mulesoft.com
name: tariq1890
- email: manuel@rueg.eu
name: mrueg
name: kube-state-metrics
sources:
- https://github.com/kubernetes/kube-state-metrics/
version: 2.13.2

View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright The Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,6 @@
approvers:
- tariq1890
- mrueg
reviewers:
- tariq1890
- mrueg

View File

@ -0,0 +1,66 @@
# kube-state-metrics Helm Chart
Installs the [kube-state-metrics agent](https://github.com/kubernetes/kube-state-metrics).
## Get Repo Info
```console
helm repo add kube-state-metrics https://kubernetes.github.io/kube-state-metrics
helm repo update
```
_See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._
## Install Chart
```console
# Helm 3
$ helm install [RELEASE_NAME] kube-state-metrics/kube-state-metrics [flags]
# Helm 2
$ helm install --name [RELEASE_NAME] kube-state-metrics/kube-state-metrics [flags]
```
_See [configuration](#configuration) below._
_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._
## Uninstall Chart
```console
# Helm 3
$ helm uninstall [RELEASE_NAME]
# Helm 2
# helm delete --purge [RELEASE_NAME]
```
This removes all the Kubernetes components associated with the chart and deletes the release.
_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._
## Upgrading Chart
```console
# Helm 3 or 2
$ helm upgrade [RELEASE_NAME] kube-state-metrics/kube-state-metrics [flags]
```
_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._
### From stable/kube-state-metrics
You can upgrade in-place:
1. [get repo info](#get-repo-info)
1. [upgrade](#upgrading-chart) your existing release name using the new chart repo
## Configuration
See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments:
```console
helm show values kube-state-metrics/kube-state-metrics
```
You may also `helm show values` on this chart's [dependencies](#dependencies) for additional options.

View File

@ -0,0 +1,10 @@
kube-state-metrics is a simple service that listens to the Kubernetes API server and generates metrics about the state of the objects.
The exposed metrics can be found here:
https://github.com/kubernetes/kube-state-metrics/blob/master/docs/README.md#exposed-metrics
The metrics are exported on the HTTP endpoint /metrics on the listening port.
In your case, {{ template "kube-state-metrics.fullname" . }}.{{ template "kube-state-metrics.namespace" . }}.svc.cluster.local:{{ .Values.service.port }}/metrics
They are served either as plaintext or protobuf depending on the Accept header.
They are designed to be consumed either by Prometheus itself or by a scraper that is compatible with scraping a Prometheus client endpoint.

View File

@ -0,0 +1,47 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "kube-state-metrics.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "kube-state-metrics.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "kube-state-metrics.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "kube-state-metrics.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
Allow the release namespace to be overridden for multi-namespace deployments in combined charts
*/}}
{{- define "kube-state-metrics.namespace" -}}
{{- if .Values.namespaceOverride -}}
{{- .Values.namespaceOverride -}}
{{- else -}}
{{- .Release.Namespace -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,23 @@
{{- if and .Values.rbac.create .Values.rbac.useClusterRole -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
name: {{ template "kube-state-metrics.fullname" . }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
{{- if .Values.rbac.useExistingRole }}
name: {{ .Values.rbac.useExistingRole }}
{{- else }}
name: {{ template "kube-state-metrics.fullname" . }}
{{- end }}
subjects:
- kind: ServiceAccount
name: {{ template "kube-state-metrics.fullname" . }}
namespace: {{ template "kube-state-metrics.namespace" . }}
{{- end -}}

View File

@ -0,0 +1,216 @@
apiVersion: apps/v1
{{- if .Values.autosharding.enabled }}
kind: StatefulSet
{{- else }}
kind: Deployment
{{- end }}
metadata:
name: {{ template "kube-state-metrics.fullname" . }}
namespace: {{ template "kube-state-metrics.namespace" . }}
labels:
app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }}
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
app.kubernetes.io/instance: "{{ .Release.Name }}"
app.kubernetes.io/managed-by: "{{ .Release.Service }}"
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
{{- if .Values.customLabels }}
{{ toYaml .Values.customLabels | indent 4 }}
{{- end }}
spec:
selector:
matchLabels:
app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }}
replicas: {{ .Values.replicas }}
{{- if .Values.autosharding.enabled }}
serviceName: {{ template "kube-state-metrics.fullname" . }}
volumeClaimTemplates: []
{{- end }}
template:
metadata:
labels:
app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }}
app.kubernetes.io/instance: "{{ .Release.Name }}"
{{- if .Values.customLabels }}
{{ toYaml .Values.customLabels | indent 8 }}
{{- end }}
{{- if .Values.podAnnotations }}
annotations:
{{ toYaml .Values.podAnnotations | indent 8 }}
{{- end }}
spec:
hostNetwork: {{ .Values.hostNetwork }}
serviceAccountName: {{ template "kube-state-metrics.serviceAccountName" . }}
{{- if .Values.securityContext.enabled }}
securityContext:
fsGroup: {{ .Values.securityContext.fsGroup }}
runAsGroup: {{ .Values.securityContext.runAsGroup }}
runAsUser: {{ .Values.securityContext.runAsUser }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: {{ .Values.priorityClassName }}
{{- end }}
containers:
- name: {{ .Chart.Name }}
{{- if .Values.autosharding.enabled }}
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{{- end }}
args:
{{ if .Values.extraArgs }}
{{- range .Values.extraArgs }}
- {{ . }}
{{- end }}
{{ end }}
{{ if .Values.collectors.certificatesigningrequests }}
- --collectors=certificatesigningrequests
{{ end }}
{{ if .Values.collectors.configmaps }}
- --collectors=configmaps
{{ end }}
{{ if .Values.collectors.cronjobs }}
- --collectors=cronjobs
{{ end }}
{{ if .Values.collectors.daemonsets }}
- --collectors=daemonsets
{{ end }}
{{ if .Values.collectors.deployments }}
- --collectors=deployments
{{ end }}
{{ if .Values.collectors.endpoints }}
- --collectors=endpoints
{{ end }}
{{ if .Values.collectors.horizontalpodautoscalers }}
- --collectors=horizontalpodautoscalers
{{ end }}
{{ if .Values.collectors.ingresses }}
- --collectors=ingresses
{{ end }}
{{ if .Values.collectors.jobs }}
- --collectors=jobs
{{ end }}
{{ if .Values.collectors.limitranges }}
- --collectors=limitranges
{{ end }}
{{ if .Values.collectors.mutatingwebhookconfigurations }}
- --collectors=mutatingwebhookconfigurations
{{ end }}
{{ if .Values.collectors.namespaces }}
- --collectors=namespaces
{{ end }}
{{ if .Values.collectors.networkpolicies }}
- --collectors=networkpolicies
{{ end }}
{{ if .Values.collectors.nodes }}
- --collectors=nodes
{{ end }}
{{ if .Values.collectors.persistentvolumeclaims }}
- --collectors=persistentvolumeclaims
{{ end }}
{{ if .Values.collectors.persistentvolumes }}
- --collectors=persistentvolumes
{{ end }}
{{ if .Values.collectors.poddisruptionbudgets }}
- --collectors=poddisruptionbudgets
{{ end }}
{{ if .Values.collectors.pods }}
- --collectors=pods
{{ end }}
{{ if .Values.collectors.replicasets }}
- --collectors=replicasets
{{ end }}
{{ if .Values.collectors.replicationcontrollers }}
- --collectors=replicationcontrollers
{{ end }}
{{ if .Values.collectors.resourcequotas }}
- --collectors=resourcequotas
{{ end }}
{{ if .Values.collectors.secrets }}
- --collectors=secrets
{{ end }}
{{ if .Values.collectors.services }}
- --collectors=services
{{ end }}
{{ if .Values.collectors.statefulsets }}
- --collectors=statefulsets
{{ end }}
{{ if .Values.collectors.storageclasses }}
- --collectors=storageclasses
{{ end }}
{{ if .Values.collectors.validatingwebhookconfigurations }}
- --collectors=validatingwebhookconfigurations
{{ end }}
{{ if .Values.collectors.verticalpodautoscalers }}
- --collectors=verticalpodautoscalers
{{ end }}
{{ if .Values.collectors.volumeattachments }}
- --collectors=volumeattachments
{{ end }}
{{ if .Values.namespace }}
- --namespace={{ .Values.namespace | join "," }}
{{ end }}
{{ if .Values.autosharding.enabled }}
- --pod=$(POD_NAME)
- --pod-namespace=$(POD_NAMESPACE)
{{ end }}
{{ if .Values.kubeconfig.enabled }}
- --kubeconfig=/opt/k8s/.kube/config
{{ end }}
{{ if .Values.selfMonitor.telemetryHost }}
- --telemetry-host={{ .Values.selfMonitor.telemetryHost }}
{{ end }}
- --telemetry-port=8081
{{- if .Values.kubeconfig.enabled }}
volumeMounts:
- name: kubeconfig
mountPath: /opt/k8s/.kube/
readOnly: true
{{- end }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
ports:
- containerPort: 8080
livenessProbe:
httpGet:
path: /healthz
port: 8080
initialDelaySeconds: 5
timeoutSeconds: 5
readinessProbe:
httpGet:
path: /
port: 8080
initialDelaySeconds: 5
timeoutSeconds: 5
{{- if .Values.resources }}
resources:
{{ toYaml .Values.resources | indent 10 }}
{{- end }}
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
{{ toYaml .Values.imagePullSecrets | indent 8 }}
{{- end }}
{{- if .Values.affinity }}
affinity:
{{ toYaml .Values.affinity | indent 8 }}
{{- end }}
{{- if .Values.nodeSelector }}
nodeSelector:
{{ toYaml .Values.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.tolerations }}
tolerations:
{{ toYaml .Values.tolerations | indent 8 }}
{{- end }}
{{- if .Values.kubeconfig.enabled}}
volumes:
- name: kubeconfig
secret:
secretName: {{ template "kube-state-metrics.fullname" . }}-kubeconfig
{{- end }}

View File

@ -0,0 +1,15 @@
{{- if .Values.kubeconfig.enabled -}}
apiVersion: v1
kind: Secret
metadata:
name: {{ template "kube-state-metrics.fullname" . }}-kubeconfig
namespace: {{ template "kube-state-metrics.namespace" . }}
labels:
app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }}
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
app.kubernetes.io/instance: "{{ .Release.Name }}"
app.kubernetes.io/managed-by: "{{ .Release.Service }}"
type: Opaque
data:
config: '{{ .Values.kubeconfig.secret }}'
{{- end -}}

View File

@ -0,0 +1,20 @@
{{- if .Values.podDisruptionBudget -}}
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: {{ template "kube-state-metrics.fullname" . }}
namespace: {{ template "kube-state-metrics.namespace" . }}
labels:
app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }}
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
app.kubernetes.io/instance: "{{ .Release.Name }}"
app.kubernetes.io/managed-by: "{{ .Release.Service }}"
{{- if .Values.customLabels }}
{{ toYaml .Values.customLabels | indent 4 }}
{{- end }}
spec:
selector:
matchLabels:
app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }}
{{ toYaml .Values.podDisruptionBudget | indent 2 }}
{{- end -}}

View File

@ -0,0 +1,42 @@
{{- if .Values.podSecurityPolicy.enabled }}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ template "kube-state-metrics.fullname" . }}
labels:
app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- if .Values.podSecurityPolicy.annotations }}
annotations:
{{ toYaml .Values.podSecurityPolicy.annotations | indent 4 }}
{{- end }}
spec:
privileged: false
volumes:
- 'secret'
{{- if .Values.podSecurityPolicy.additionalVolumes }}
{{ toYaml .Values.podSecurityPolicy.additionalVolumes | indent 4 }}
{{- end }}
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'MustRunAsNonRoot'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
# Forbid adding the root group.
- min: 1
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
# Forbid adding the root group.
- min: 1
max: 65535
readOnlyRootFilesystem: false
{{- end }}

View File

@ -0,0 +1,22 @@
{{- if and .Values.podSecurityPolicy.enabled .Values.rbac.create -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
name: psp-{{ template "kube-state-metrics.fullname" . }}
rules:
{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }}
{{- if semverCompare "> 1.15.0-0" $kubeTargetVersion }}
- apiGroups: ['policy']
{{- else }}
- apiGroups: ['extensions']
{{- end }}
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames:
- {{ template "kube-state-metrics.fullname" . }}
{{- end }}

View File

@ -0,0 +1,19 @@
{{- if and .Values.podSecurityPolicy.enabled .Values.rbac.create -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
name: psp-{{ template "kube-state-metrics.fullname" . }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: psp-{{ template "kube-state-metrics.fullname" . }}
subjects:
- kind: ServiceAccount
name: {{ template "kube-state-metrics.fullname" . }}
namespace: {{ template "kube-state-metrics.namespace" . }}
{{- end }}

View File

@ -0,0 +1,192 @@
{{- if and (eq $.Values.rbac.create true) (not .Values.rbac.useExistingRole) -}}
{{- if eq .Values.rbac.useClusterRole false }}
{{- range (split "," $.Values.namespace) }}
{{- end }}
{{- end -}}
---
apiVersion: rbac.authorization.k8s.io/v1
{{- if eq .Values.rbac.useClusterRole false }}
kind: Role
{{- else }}
kind: ClusterRole
{{- end }}
metadata:
labels:
app.kubernetes.io/name: {{ template "kube-state-metrics.name" $ }}
helm.sh/chart: {{ $.Chart.Name }}-{{ $.Chart.Version }}
app.kubernetes.io/managed-by: {{ $.Release.Service }}
app.kubernetes.io/instance: {{ $.Release.Name }}
name: {{ template "kube-state-metrics.fullname" $ }}
{{- if eq .Values.rbac.useClusterRole false }}
namespace: {{ . }}
{{- end }}
rules:
{{ if $.Values.collectors.certificatesigningrequests }}
- apiGroups: ["certificates.k8s.io"]
resources:
- certificatesigningrequests
verbs: ["list", "watch"]
{{ end -}}
{{ if $.Values.collectors.configmaps }}
- apiGroups: [""]
resources:
- configmaps
verbs: ["list", "watch"]
{{ end -}}
{{ if $.Values.collectors.cronjobs }}
- apiGroups: ["batch"]
resources:
- cronjobs
verbs: ["list", "watch"]
{{ end -}}
{{ if $.Values.collectors.daemonsets }}
- apiGroups: ["extensions", "apps"]
resources:
- daemonsets
verbs: ["list", "watch"]
{{ end -}}
{{ if $.Values.collectors.deployments }}
- apiGroups: ["extensions", "apps"]
resources:
- deployments
verbs: ["list", "watch"]
{{ end -}}
{{ if $.Values.collectors.endpoints }}
- apiGroups: [""]
resources:
- endpoints
verbs: ["list", "watch"]
{{ end -}}
{{ if $.Values.collectors.horizontalpodautoscalers }}
- apiGroups: ["autoscaling"]
resources:
- horizontalpodautoscalers
verbs: ["list", "watch"]
{{ end -}}
{{ if $.Values.collectors.ingresses }}
- apiGroups: ["extensions", "networking.k8s.io"]
resources:
- ingresses
verbs: ["list", "watch"]
{{ end -}}
{{ if $.Values.collectors.jobs }}
- apiGroups: ["batch"]
resources:
- jobs
verbs: ["list", "watch"]
{{ end -}}
{{ if $.Values.collectors.limitranges }}
- apiGroups: [""]
resources:
- limitranges
verbs: ["list", "watch"]
{{ end -}}
{{ if $.Values.collectors.mutatingwebhookconfigurations }}
- apiGroups: ["admissionregistration.k8s.io"]
resources:
- mutatingwebhookconfigurations
verbs: ["list", "watch"]
{{ end -}}
{{ if $.Values.collectors.namespaces }}
- apiGroups: [""]
resources:
- namespaces
verbs: ["list", "watch"]
{{ end -}}
{{ if $.Values.collectors.networkpolicies }}
- apiGroups: ["networking.k8s.io"]
resources:
- networkpolicies
verbs: ["list", "watch"]
{{ end -}}
{{ if $.Values.collectors.nodes }}
- apiGroups: [""]
resources:
- nodes
verbs: ["list", "watch"]
{{ end -}}
{{ if $.Values.collectors.persistentvolumeclaims }}
- apiGroups: [""]
resources:
- persistentvolumeclaims
verbs: ["list", "watch"]
{{ end -}}
{{ if $.Values.collectors.persistentvolumes }}
- apiGroups: [""]
resources:
- persistentvolumes
verbs: ["list", "watch"]
{{ end -}}
{{ if $.Values.collectors.poddisruptionbudgets }}
- apiGroups: ["policy"]
resources:
- poddisruptionbudgets
verbs: ["list", "watch"]
{{ end -}}
{{ if $.Values.collectors.pods }}
- apiGroups: [""]
resources:
- pods
verbs: ["list", "watch"]
{{ end -}}
{{ if $.Values.collectors.replicasets }}
- apiGroups: ["extensions", "apps"]
resources:
- replicasets
verbs: ["list", "watch"]
{{ end -}}
{{ if $.Values.collectors.replicationcontrollers }}
- apiGroups: [""]
resources:
- replicationcontrollers
verbs: ["list", "watch"]
{{ end -}}
{{ if $.Values.collectors.resourcequotas }}
- apiGroups: [""]
resources:
- resourcequotas
verbs: ["list", "watch"]
{{ end -}}
{{ if $.Values.collectors.secrets }}
- apiGroups: [""]
resources:
- secrets
verbs: ["list", "watch"]
{{ end -}}
{{ if $.Values.collectors.services }}
- apiGroups: [""]
resources:
- services
verbs: ["list", "watch"]
{{ end -}}
{{ if $.Values.collectors.statefulsets }}
- apiGroups: ["apps"]
resources:
- statefulsets
verbs: ["list", "watch"]
{{ end -}}
{{ if $.Values.collectors.storageclasses }}
- apiGroups: ["storage.k8s.io"]
resources:
- storageclasses
verbs: ["list", "watch"]
{{ end -}}
{{ if $.Values.collectors.validatingwebhookconfigurations }}
- apiGroups: ["admissionregistration.k8s.io"]
resources:
- validatingwebhookconfigurations
verbs: ["list", "watch"]
{{ end -}}
{{ if $.Values.collectors.volumeattachments }}
- apiGroups: ["storage.k8s.io"]
resources:
- volumeattachments
verbs: ["list", "watch"]
{{ end -}}
{{ if $.Values.collectors.verticalpodautoscalers }}
- apiGroups: ["autoscaling.k8s.io"]
resources:
- verticalpodautoscalers
verbs: ["list", "watch"]
{{ end -}}
{{- end -}}

View File

@ -0,0 +1,27 @@
{{- if and (eq .Values.rbac.create true) (eq .Values.rbac.useClusterRole false) -}}
{{- range (split "," $.Values.namespace) }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/name: {{ template "kube-state-metrics.name" $ }}
helm.sh/chart: {{ $.Chart.Name }}-{{ $.Chart.Version }}
app.kubernetes.io/managed-by: {{ $.Release.Service }}
app.kubernetes.io/instance: {{ $.Release.Name }}
name: {{ template "kube-state-metrics.fullname" $ }}
namespace: {{ . }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
{{- if (not $.Values.rbac.useExistingRole) }}
name: {{ template "kube-state-metrics.fullname" $ }}
{{- else }}
name: {{ $.Values.rbac.useExistingRole }}
{{- end }}
subjects:
- kind: ServiceAccount
name: {{ template "kube-state-metrics.fullname" $ }}
namespace: {{ template "kube-state-metrics.namespace" $ }}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,42 @@
apiVersion: v1
kind: Service
metadata:
name: {{ template "kube-state-metrics.fullname" . }}
namespace: {{ template "kube-state-metrics.namespace" . }}
labels:
app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }}
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
app.kubernetes.io/instance: "{{ .Release.Name }}"
app.kubernetes.io/managed-by: "{{ .Release.Service }}"
{{- if .Values.customLabels }}
{{ toYaml .Values.customLabels | indent 4 }}
{{- end }}
annotations:
{{- if .Values.prometheusScrape }}
prometheus.io/scrape: '{{ .Values.prometheusScrape }}'
{{- end }}
{{- if .Values.service.annotations }}
{{- toYaml .Values.service.annotations | nindent 4 }}
{{- end }}
spec:
type: "{{ .Values.service.type }}"
ports:
- name: "http"
protocol: TCP
port: {{ .Values.service.port }}
{{- if .Values.service.nodePort }}
nodePort: {{ .Values.service.nodePort }}
{{- end }}
targetPort: 8080
{{ if .Values.selfMonitor.enabled }}
- name: "metrics"
protocol: TCP
port: {{ .Values.selfMonitor.telemetryPort | default 8081 }}
targetPort: 8081
{{ end }}
{{- if .Values.service.loadBalancerIP }}
loadBalancerIP: "{{ .Values.service.loadBalancerIP }}"
{{- end }}
selector:
app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}

View File

@ -0,0 +1,18 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
name: {{ template "kube-state-metrics.fullname" . }}
namespace: {{ template "kube-state-metrics.namespace" . }}
{{- if .Values.serviceAccount.annotations }}
annotations:
{{ toYaml .Values.serviceAccount.annotations | indent 4 }}
{{- end }}
imagePullSecrets:
{{ toYaml .Values.serviceAccount.imagePullSecrets | indent 2 }}
{{- end -}}

View File

@ -0,0 +1,34 @@
{{- if .Values.prometheus.monitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "kube-state-metrics.fullname" . }}
namespace: {{ template "kube-state-metrics.namespace" . }}
labels:
app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }}
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
app.kubernetes.io/instance: "{{ .Release.Name }}"
app.kubernetes.io/managed-by: "{{ .Release.Service }}"
{{- if .Values.prometheus.monitor.additionalLabels }}
{{ toYaml .Values.prometheus.monitor.additionalLabels | indent 4 }}
{{- end }}
{{- if .Values.customLabels }}
{{ toYaml .Values.customLabels | indent 4 }}
{{- end }}
spec:
selector:
matchLabels:
app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
endpoints:
- port: http
{{- if .Values.prometheus.monitor.honorLabels }}
honorLabels: true
{{- end }}
{{ if .Values.selfMonitor.enabled }}
- port: metrics
{{- if .Values.prometheus.monitor.honorLabels }}
honorLabels: true
{{- end }}
{{ end }}
{{- end }}

View File

@ -0,0 +1,29 @@
{{- if and .Values.autosharding.enabled .Values.rbac.create -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: stsdiscovery-{{ template "kube-state-metrics.fullname" . }}
namespace: {{ template "kube-state-metrics.namespace" . }}
labels:
app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- apps
resourceNames:
- {{ template "kube-state-metrics.fullname" . }}
resources:
- statefulsets
verbs:
- get
- list
- watch
{{- end }}

View File

@ -0,0 +1,20 @@
{{- if and .Values.autosharding.enabled .Values.rbac.create -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: stsdiscovery-{{ template "kube-state-metrics.fullname" . }}
namespace: {{ template "kube-state-metrics.namespace" . }}
labels:
app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: stsdiscovery-{{ template "kube-state-metrics.fullname" . }}
subjects:
- kind: ServiceAccount
name: {{ template "kube-state-metrics.fullname" . }}
namespace: {{ template "kube-state-metrics.namespace" . }}
{{- end }}

View File

@ -0,0 +1,179 @@
# Default values for kube-state-metrics.
prometheusScrape: true
image:
repository: k8s.gcr.io/kube-state-metrics/kube-state-metrics
tag: v1.9.8
pullPolicy: IfNotPresent
imagePullSecrets: []
# - name: "image-pull-secret"
# If set to true, this will deploy kube-state-metrics as a StatefulSet and the data
# will be automatically sharded across <.Values.replicas> pods using the built-in
# autodiscovery feature: https://github.com/kubernetes/kube-state-metrics#automated-sharding
# This is an experimental feature and there are no stability guarantees.
autosharding:
enabled: false
replicas: 1
# List of additional cli arguments to configure kube-state-metrics
# for example: --enable-gzip-encoding, --log-file, etc.
# all the possible args can be found here: https://github.com/kubernetes/kube-state-metrics/blob/master/docs/cli-arguments.md
extraArgs: []
service:
port: 8080
# Default to clusterIP for backward compatibility
type: ClusterIP
nodePort: 0
loadBalancerIP: ""
annotations: {}
customLabels: {}
hostNetwork: false
rbac:
# If true, create & use RBAC resources
create: true
# Set to a rolename to use existing role - skipping role creating - but still doing serviceaccount and rolebinding to it, rolename set here.
# useExistingRole: your-existing-role
# If set to false - Run without Cluteradmin privs needed - ONLY works if namespace is also set (if useExistingRole is set this name is used as ClusterRole or Role to bind to)
useClusterRole: true
serviceAccount:
# Specifies whether a ServiceAccount should be created, require rbac true
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
# Reference to one or more secrets to be used when pulling images
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
imagePullSecrets: []
# ServiceAccount annotations.
# Use case: AWS EKS IAM roles for service accounts
# ref: https://docs.aws.amazon.com/eks/latest/userguide/specify-service-account-role.html
annotations: {}
prometheus:
monitor:
enabled: false
additionalLabels: {}
namespace: ""
honorLabels: false
## Specify if a Pod Security Policy for kube-state-metrics must be created
## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
##
podSecurityPolicy:
enabled: false
annotations: {}
## Specify pod annotations
## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor
## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp
## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl
##
# seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
# seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
# apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
additionalVolumes: []
securityContext:
enabled: true
runAsGroup: 65534
runAsUser: 65534
fsGroup: 65534
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
nodeSelector: {}
## Affinity settings for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
affinity: {}
## Tolerations for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
tolerations: []
# Annotations to be added to the pod
podAnnotations: {}
## Assign a PriorityClassName to pods if set
# priorityClassName: ""
# Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
podDisruptionBudget: {}
# Available collectors for kube-state-metrics. By default all available
# collectors are enabled.
collectors:
certificatesigningrequests: true
configmaps: true
cronjobs: true
daemonsets: true
deployments: true
endpoints: true
horizontalpodautoscalers: true
ingresses: true
jobs: true
limitranges: true
mutatingwebhookconfigurations: true
namespaces: true
networkpolicies: true
nodes: true
persistentvolumeclaims: true
persistentvolumes: true
poddisruptionbudgets: true
pods: true
replicasets: true
replicationcontrollers: true
resourcequotas: true
secrets: true
services: true
statefulsets: true
storageclasses: true
validatingwebhookconfigurations: true
verticalpodautoscalers: false
volumeattachments: true
# Enabling kubeconfig will pass the --kubeconfig argument to the container
kubeconfig:
enabled: false
# base64 encoded kube-config file
secret:
# Namespace to be enabled for collecting resources. By default all namespaces are collected.
# namespace: ""
## Override the deployment namespace
##
namespaceOverride: ""
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 64Mi
# requests:
# cpu: 10m
# memory: 32Mi
## Provide a k8s version to define apiGroups for podSecurityPolicy Cluster Role.
## For example: kubeTargetVersionOverride: 1.14.9
##
kubeTargetVersionOverride: ""
# Enable self metrics configuration for service and Service Monitor
# Default values for telemetry configuration can be overriden
selfMonitor:
enabled: false
# telemetryHost: 0.0.0.0
# telemetryPort: 8081

View File

@ -0,0 +1 @@
templates/admission-webhooks/job-patch/README.md

View File

@ -0,0 +1,19 @@
apiVersion: v1
appVersion: 0.6.0
description: A Helm chart to deploy the New Relic Infrastructure Kubernetes Operator.
home: https://hub.docker.com/r/newrelic/newrelic-infra-operator
icon: https://newrelic.com/assets/newrelic/source/NewRelic-logo-square.svg
keywords:
- infrastructure
- newrelic
- monitoring
maintainers:
- name: douglascamata
- name: paologallinaharbur
- name: davidbrota
- name: gsanchezgavier
- name: roobre
name: newrelic-infra-operator
sources:
- https://github.com/newrelic/newrelic-infra-operator
version: 0.6.0

View File

@ -0,0 +1,142 @@
# newrelic-infra-operator
## Chart Details
This chart will deploy the [New Relic Infrastructure Operator][1], which injects the New Relic Infrastructure solution
as a sidecar to specific pods.
This is typically used in environments where DaemonSets are not available, such as EKS Fargate.
## Configuration
| Parameter | Description | Default |
| ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------- |
| `global.cluster` - `cluster` | The cluster name for the Kubernetes cluster. | |
| `global.licenseKey` - `licenseKey` | The [license key](https://docs.newrelic.com/docs/accounts/install-new-relic/account-setup/license-key) for your New Relic Account. This will be preferred configuration option if both `licenseKey` and `customSecret` are specified. | |
| `global.fargate` - `fargate` | Must be set to `true` when deploying in an EKS Fargate environment. Adds the default policies and customAttributes to inject on fargate | |
| `image.repository` | The container to pull. | `newrelic/newrelic-infra-operator` |
| `image.pullPolicy` | The pull policy. | `IfNotPresent` |
| `image.tag` | The version of the image to pull. | `appVersion` |
| `image.pullSecrets` | The image pull secrets. | `nil` |
| `admissionWebhooksPatchJob.image.repository` | The job container to pull. | `k8s.gcr.io/ingress-nginx/kube-webhook-certgen` |
| `admissionWebhooksPatchJob.image.pullPolicy` | The job pull policy. | `IfNotPresent` |
| `admissionWebhooksPatchJob.image.pullSecrets` | Image pull secrets. | `nil` |
| `admissionWebhooksPatchJob.image.tag` | The job version of the container to pull. | `v1.1.1` |
| `admissionWebhooksPatchJob.volumeMounts` | Additional Volume mounts for Cert Job. | `[]` |
| `admissionWebhooksPatchJob.volumes` | Additional Volumes for Cert Job. | `[]` |
| `replicas` | Number of replicas in the deployment. | `1` |
| `resources` | Resources you wish to assign to the pod. | See Resources below |
| `serviceAccount.create` | If true a service account would be created and assigned for the webhook and the job. | `true` |
| `serviceAccount.name` | The service account to assign to the webhook and the job. If `serviceAccount.create` is true then this name will be used when creating the service account; if this value is not set or it evaluates to false, then when creating the account the returned value from the template `newrelic-infra-operator.fullname` will be used as name. | |
| `certManager.enabled` | Use cert-manager to provision the MutatingWebhookConfiguration certs. | `false` |
| `podSecurityContext.enabled` | Enable custom Pod Security Context. | `false` |
| `podSecurityContext.fsGroup` | fsGroup for Pod Security Context. | `1001` |
| `podSecurityContext.runAsUser` | runAsUser UID for Pod Security Context. | `1001` |
| `podSecurityContext.runAsGroup` | runAsGroup GID for Pod Security Context. | `1001` |
| `podAnnotations` | If you wish to provide additional annotations to apply to the pod(s), specify them here. | |
| `priorityClassName` | Scheduling priority of the pod. | `nil` |
| `nodeSelector` | Node label to use for scheduling. | `{}` |
| `timeoutSeconds` | Seconds to wait for a webhook to respond. The timeout value must be between 1 and 30 seconds. | `30` |
| `tolerations` | List of node taints to tolerate (requires Kubernetes >= 1.6) | `[]` |
| `affinity` | Node affinity to use for scheduling. | `{}` |
| `config.ignoreMutationErrors` | If true it instruments the operator to ignore injection error instead of failing. | `true` |
| `config.infraAgentInjection.policies[]` | All policies are ORed, if one policy matches the sidecar is injected. Within a policy PodSelectors, NamespaceSelector and NamespaceName are ANDed, any of these, if not specified, is ignored. | `[podSelector{matchExpressions[{key:"label.eks.amazonaws.com/fargate-profile",operator:"Exists"}]}]` |
| `config.infraAgentInjection.policies[].podSelector` | Selector on Pod Labels. | |
| `config.infraAgentInjection.policies[].namespaceSelector` | Selector on Namespace labels. | |
| `config.infraAgentInjection.policies[].namespaceName` | If set only pods belonging to such namespace matches the policy. | |
| `config.infraAgentInjection.agentConfig.customAttributes[]` | CustomAttributes added to each sidecar | |
| `config.infraAgentInjection.agentConfig.customAttributes[].name` | Name of custom attribute to include. | |
| `config.infraAgentInjection.agentConfig.customAttributes[].defaultValue` | Default value for custom attribute to include. | |
| `config.infraAgentInjection.agentConfig.customAttributes[].fromLabel` | Label from which take the value of the custom attribute. | |
| `config.infraAgentInjection.agentConfig.image.pullPolicy` | The sidecar image pull policy. | `IfNotPresent` |
| `config.infraAgentInjection.agentConfig.image.repository` | The infrastructure agent repository for the sidecar container. | `newrelic/infrastructure-k8s` |
| `config.infraAgentInjection.agentConfig.image.tag` | The infrastructure agent image tag for the sidecar container. | `2.8.2-unprivileged` |
| `config.infraAgentInjection.agentConfig.podSecurityContext.runAsUser` | runAsUser UID for Pod Security Context. | |
| `config.infraAgentInjection.agentConfig.podSecurityContext.runAsGroup` | runAsGroup UID for Pod Security Context. | |
| `config.infraAgentInjection.agentConfig.configSelectors[]` | ConfigSelectors is the way to configure resource requirements and extra envVars of the injected sidecar container. When mutating it will be applied the first configuration having the labelSelector matching with the mutating pod. | |
| `config.infraAgentInjection.agentConfig.configSelectors[].resourceRequirements` | ResourceRequirements to apply to the sidecar. | |
| `config.infraAgentInjection.agentConfig.configSelectors[].extraEnvVars` | ExtraEnvVars to pass to the injected sidecar. | |
| `config.infraAgentInjection.agentConfig.configSelectors[].labelSelector` | LabelSelector matching the labels of the mutating pods. | |
## Example
Make sure you have [added the New Relic chart repository.](../../README.md#install)
Then, to install this chart, run the following command:
```sh
helm upgrade --install [release-name] newrelic/newrelic-infra-operator --set cluster=my_cluster_name --set licenseKey [your-license-key]
```
When installing on Fargate add as well `--set fargate=true`
## Configure in which pods the sidecar should be injected
Policies are available in order to configure in which pods the sidecar should be injected.
Each policy is evaluated independently and if at least one policy matches the operator will inject the sidecar.
Policies are composed by `namespaceSelector` checking the labels of the Pod namespace, `podSelector` checking
the labels of the Pod and `namespace` checking the namespace name. Each of those, if specified, are ANDed.
By default, the policies are configured in order to inject the sidecar in each pod belonging to a Fargate profile.
>Moreover, it is possible to add the label `infra-operator.newrelic.com/disable-injection` to Pods to exclude injection
for a single Pod that otherwise would be selected by the policies.
Please make sure to configure policies correctly to avoid injecting sidecar for pods running on EC2 nodes
already monitored by the infrastructure DaemonSet.
## Configure the sidecar with labelsSelectors
It is also possible to configure `resourceRequirements` and `extraEnvVars` based on the labels of the mutating Pod.
The current configuration increases the resource requirements for sidecar injected on `KSM` instances. Moreover,
injectes disable the `DISABLE_KUBE_STATE_METRICS` environment variable for Pods not running on `KSM` instances
to decrease the load on the API server.
## Resources
The default set of resources assigned to the newrelic-infra-operator pods is shown below:
```yaml
resources:
limits:
memory: 80M
requests:
cpu: 100m
memory: 30M
```
The default set of resources assigned to the injected sidecar when the pod is **not** KSM is shown below:
```yaml
resources:
limits:
memory: 100M
cpu: 200m
requests:
memory: 50M
cpu: 100m
```
The default set of resources assigned to the injected sidecar when the pod is KSM is shown below:
```yaml
resources:
limits:
memory: 300M
cpu: 300m
requests:
memory: 150M
cpu: 150m
```
## Tolerations
No default set of tolerations are defined.
Please note that these tolerations are applied only to the operator and the certificate-related jobs themselves, and not to any pod or container injected by it.
[1]: https://github.com/newrelic/newrelic-infra-operator
[2]: https://cert-manager.io/

View File

@ -0,0 +1,39 @@
cluster: test-cluster
licenseKey: pleasePassCIThanks
serviceAccount:
name: newrelic-infra-operator-test
image:
repository: e2e/newrelic-infra-operator
tag: test # Defaults to AppVersion
pullPolicy: IfNotPresent
pullSecrets:
- name: test-pull-secret
admissionWebhooksPatchJob:
volumeMounts:
- name: tmp
mountPath: /tmp
volumes:
- name: tmp
emptyDir:
podAnnotations:
test-annotation: test-value
affinity:
podAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
podAffinityTerm:
topologyKey: topology.kubernetes.io/zone
labelSelector:
matchExpressions:
- key: test-key
operator: In
values:
- test-value
tolerations:
- key: "key1"
operator: "Exists"
effect: "NoSchedule"
nodeSelector:
beta.kubernetes.io/os: linux
fargate: true

View File

@ -0,0 +1,4 @@
Your deployment of the New Relic Infrastructure Operator is complete.
You can check on the progress of this by running the following command:
kubectl get deployments -o wide -w --namespace {{ .Release.Namespace }} {{ template "newrelic-infra-operator.fullname" . }}

View File

@ -0,0 +1,148 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "newrelic-infra-operator.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "newrelic-infra-operator.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "newrelic-infra-operator.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common app label
*/}}
{{- define "newrelic-infra-operator.appLabel" -}}
app.kubernetes.io/name: {{ include "newrelic-infra-operator.name" . }}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "newrelic-infra-operator.labels" -}}
{{ include "newrelic-infra-operator.appLabel" . }}
helm.sh/chart: {{ include "newrelic-infra-operator.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "newrelic-infra-operator.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "newrelic-infra-operator.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
Return the licenseKey
*/}}
{{- define "newrelic-infra-operator.licenseKey" -}}
{{- if .Values.global}}
{{- if .Values.global.licenseKey }}
{{- .Values.global.licenseKey -}}
{{- else -}}
{{- .Values.licenseKey | default "" -}}
{{- end -}}
{{- else -}}
{{- .Values.licenseKey | default "" -}}
{{- end -}}
{{- end -}}
{{/*
Return the cluster
*/}}
{{- define "newrelic-infra-operator.cluster" -}}
{{- if .Values.global -}}
{{- if .Values.global.cluster -}}
{{- .Values.global.cluster -}}
{{- else -}}
{{- .Values.cluster | required "cluster name must be set" -}}
{{- end -}}
{{- else -}}
{{- .Values.cluster | required "cluster name must be set" -}}
{{- end -}}
{{- end -}}
{{/*
Renders a value that contains template.
Usage:
{{ include "tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }}
*/}}
{{- define "tplvalues.render" -}}
{{- if typeIs "string" .value }}
{{- tpl .value .context }}
{{- else }}
{{- tpl (.value | toYaml) .context }}
{{- end }}
{{- end -}}
{{/*
Return the customSecretName
*/}}
{{- define "newrelic-infra-operator.customSecretName" -}}
{{- if .Values.global }}
{{- if .Values.global.customSecretName }}
{{- .Values.global.customSecretName -}}
{{- else -}}
{{- .Values.customSecretName | default "" -}}
{{- end -}}
{{- else -}}
{{- .Values.customSecretName | default "" -}}
{{- end -}}
{{- end -}}
{{/*
Return the customSecretLicenseKey
*/}}
{{- define "newrelic-infra-operator.customSecretLicenseKey" -}}
{{- if .Values.global }}
{{- if .Values.global.customSecretLicenseKey }}
{{- .Values.global.customSecretLicenseKey -}}
{{- else -}}
{{- .Values.customSecretLicenseKey | default "" -}}
{{- end -}}
{{- else -}}
{{- .Values.customSecretLicenseKey | default "" -}}
{{- end -}}
{{- end -}}
{{/*
Returns fargate
*/}}
{{- define "newrelic.fargate" -}}
{{- if .Values.global }}
{{- if .Values.global.fargate }}
{{- .Values.global.fargate -}}
{{- end -}}
{{- else if .Values.fargate }}
{{- .Values.fargate -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,25 @@
{{- if (and (not .Values.customTLSCertificate) (not .Values.certManager.enabled)) }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "newrelic-infra-operator.fullname" . }}-admission
annotations:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
app: {{ template "newrelic-infra-operator.name" $ }}-admission
{{ include "newrelic-infra-operator.labels" $ | indent 4 }}
rules:
- apiGroups:
- admissionregistration.k8s.io
resources:
- mutatingwebhookconfigurations
verbs:
- get
- update
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames:
- {{ template "newrelic-infra-operator.fullname" . }}-admission
{{- end }}

View File

@ -0,0 +1,20 @@
{{- if (and (not .Values.customTLSCertificate) (not .Values.certManager.enabled)) }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "newrelic-infra-operator.fullname" . }}-admission
annotations:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
app: {{ template "newrelic-infra-operator.name" $ }}-admission
{{ include "newrelic-infra-operator.labels" $ | indent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "newrelic-infra-operator.fullname" . }}-admission
subjects:
- kind: ServiceAccount
name: {{ template "newrelic-infra-operator.fullname" . }}-admission
namespace: {{ .Release.Namespace }}
{{- end }}

View File

@ -0,0 +1,54 @@
{{- if (and (not .Values.customTLSCertificate) (not .Values.certManager.enabled)) }}
apiVersion: batch/v1
kind: Job
metadata:
namespace: {{ .Release.Namespace }}
name: {{ template "newrelic-infra-operator.fullname" . }}-admission-create
annotations:
"helm.sh/hook": pre-install,pre-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
app: {{ template "newrelic-infra-operator.name" $ }}-admission-create
{{ include "newrelic-infra-operator.labels" $ | indent 4 }}
spec:
template:
metadata:
name: {{ template "newrelic-infra-operator.fullname" . }}-admission-create
labels:
app: {{ template "newrelic-infra-operator.name" $ }}-admission-create
{{ include "newrelic-infra-operator.labels" $ | indent 8 }}
spec:
{{- if .Values.admissionWebhooksPatchJob.image.pullSecrets }}
imagePullSecrets:
{{ toYaml .Values.admissionWebhooksPatchJob.image.pullSecrets | indent 8 }}
{{- end }}
containers:
- name: create
image: {{ .Values.admissionWebhooksPatchJob.image.repository }}:{{ .Values.admissionWebhooksPatchJob.image.tag }}
imagePullPolicy: {{ .Values.admissionWebhooksPatchJob.image.pullPolicy }}
args:
- create
- --host={{ template "newrelic-infra-operator.fullname" . }},{{ template "newrelic-infra-operator.fullname" . }}.{{ .Release.Namespace }}.svc
- --namespace={{ .Release.Namespace }}
- --secret-name={{ template "newrelic-infra-operator.fullname" . }}-admission
- --cert-name=tls.crt
- --key-name=tls.key
{{- if .Values.admissionWebhooksPatchJob.image.volumeMounts }}
volumeMounts:
{{- include "tplvalues.render" ( dict "value" .Values.admissionWebhooksPatchJob.image.volumeMounts "context" $ ) | nindent 10 }}
{{- end }}
{{- if .Values.admissionWebhooksPatchJob.image.volumes }}
volumes:
{{- include "tplvalues.render" ( dict "value" .Values.admissionWebhooksPatchJob.image.volumes "context" $ ) | nindent 8 }}
{{- end }}
restartPolicy: OnFailure
serviceAccountName: {{ template "newrelic-infra-operator.fullname" . }}-admission
securityContext:
runAsGroup: 2000
runAsNonRoot: true
runAsUser: 2000
{{- if .Values.tolerations }}
tolerations:
{{- toYaml .Values.tolerations | nindent 8 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,54 @@
{{- if (and (not .Values.customTLSCertificate) (not .Values.certManager.enabled)) }}
apiVersion: batch/v1
kind: Job
metadata:
namespace: {{ .Release.Namespace }}
name: {{ template "newrelic-infra-operator.fullname" . }}-admission-patch
annotations:
"helm.sh/hook": post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
app: {{ template "newrelic-infra-operator.name" $ }}-admission-patch
{{ include "newrelic-infra-operator.labels" $ | indent 4 }}
spec:
template:
metadata:
name: {{ template "newrelic-infra-operator.fullname" . }}-admission-patch
labels:
app: {{ template "newrelic-infra-operator.name" $ }}-admission-patch
{{ include "newrelic-infra-operator.labels" $ | indent 8 }}
spec:
{{- if .Values.admissionWebhooksPatchJob.image.pullSecrets }}
imagePullSecrets:
{{ toYaml .Values.admissionWebhooksPatchJob.image.pullSecrets | indent 8 }}
{{- end }}
containers:
- name: patch
image: {{ .Values.admissionWebhooksPatchJob.image.repository }}:{{ .Values.admissionWebhooksPatchJob.image.tag }}
imagePullPolicy: {{ .Values.admissionWebhooksPatchJob.image.pullPolicy }}
args:
- patch
- --webhook-name={{ template "newrelic-infra-operator.fullname" . }}
- --namespace={{ .Release.Namespace }}
- --secret-name={{ template "newrelic-infra-operator.fullname" . }}-admission
- --patch-failure-policy=Ignore
- --patch-validating=false
{{- if .Values.admissionWebhooksPatchJob.image.volumeMounts }}
volumeMounts:
{{- include "tplvalues.render" ( dict "value" .Values.admissionWebhooksPatchJob.image.volumeMounts "context" $ ) | nindent 10 }}
{{- end }}
{{- if .Values.admissionWebhooksPatchJob.image.volumes }}
volumes:
{{- include "tplvalues.render" ( dict "value" .Values.admissionWebhooksPatchJob.image.volumes "context" $ ) | nindent 8 }}
{{- end }}
restartPolicy: OnFailure
serviceAccountName: {{ template "newrelic-infra-operator.fullname" . }}-admission
securityContext:
runAsGroup: 2000
runAsNonRoot: true
runAsUser: 2000
{{- if .Values.tolerations }}
tolerations:
{{- toYaml .Values.tolerations | nindent 8 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,50 @@
{{- if (and (not .Values.customTLSCertificate) (not .Values.certManager.enabled)) }}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ template "newrelic-infra-operator.fullname" . }}-admission
annotations:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
app: {{ template "newrelic-infra-operator.name" . }}-admission
{{ include "newrelic-infra-operator.labels" . | indent 4 }}
spec:
privileged: false
# Required to prevent escalations to root.
# allowPrivilegeEscalation: false
# This is redundant with non-root + disallow privilege escalation,
# but we can provide it for defense in depth.
# requiredDropCapabilities:
# - ALL
# Allow core volume types.
volumes:
- 'configMap'
- 'emptyDir'
- 'projected'
- 'secret'
- 'downwardAPI'
- 'persistentVolumeClaim'
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
# Permits the container to run with root privileges as well.
rule: 'RunAsAny'
seLinux:
# This policy assumes the nodes are using AppArmor rather than SELinux.
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
# Forbid adding the root group.
- min: 0
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
# Forbid adding the root group.
- min: 0
max: 65535
readOnlyRootFilesystem: false
{{- end }}

View File

@ -0,0 +1,21 @@
{{- if (and (not .Values.customTLSCertificate) (not .Values.certManager.enabled)) }}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
namespace: {{ .Release.Namespace }}
name: {{ template "newrelic-infra-operator.fullname" . }}-admission
annotations:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
app: {{ template "newrelic-infra-operator.name" $ }}-admission
{{ include "newrelic-infra-operator.labels" $ | indent 4 }}
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- create
{{- end }}

View File

@ -0,0 +1,21 @@
{{- if (and (not .Values.customTLSCertificate) (not .Values.certManager.enabled)) }}
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
namespace: {{ .Release.Namespace }}
name: {{ template "newrelic-infra-operator.fullname" . }}-admission
annotations:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
app: {{ template "newrelic-infra-operator.name" $ }}-admission
{{ include "newrelic-infra-operator.labels" $ | indent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ template "newrelic-infra-operator.fullname" . }}-admission
subjects:
- kind: ServiceAccount
name: {{ template "newrelic-infra-operator.fullname" . }}-admission
namespace: {{ .Release.Namespace }}
{{- end }}

View File

@ -0,0 +1,13 @@
{{- if (and (not .Values.customTLSCertificate) (not .Values.certManager.enabled)) }}
apiVersion: v1
kind: ServiceAccount
metadata:
namespace: {{ .Release.Namespace }}
name: {{ template "newrelic-infra-operator.fullname" . }}-admission
annotations:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
app: {{ template "newrelic-infra-operator.name" $ }}-admission
{{ include "newrelic-infra-operator.labels" $ | indent 4 }}
{{- end }}

View File

@ -0,0 +1,32 @@
apiVersion: admissionregistration.k8s.io/v1
kind: MutatingWebhookConfiguration
metadata:
name: {{ template "newrelic-infra-operator.fullname" . }}
{{- if .Values.certManager.enabled }}
annotations:
certmanager.k8s.io/inject-ca-from: {{ printf "%s/%s-root-cert" .Release.Namespace (include "newrelic-infra-operator.fullname" .) | quote }}
cert-manager.io/inject-ca-from: {{ printf "%s/%s-root-cert" .Release.Namespace (include "newrelic-infra-operator.fullname" .) | quote }}
{{- end }}
labels:
{{- include "newrelic-infra-operator.labels" . | nindent 4 }}
webhooks:
- name: newrelic-infra-operator.newrelic.com
clientConfig:
service:
name: {{ template "newrelic-infra-operator.fullname" . }}
namespace: {{ .Release.Namespace }}
path: "/mutate-v1-pod"
{{- if not .Values.certManager.enabled }}
caBundle: ""
{{- end }}
rules:
- operations: ["CREATE"]
apiGroups: [""]
apiVersions: ["v1"]
resources: ["pods"]
failurePolicy: Ignore
timeoutSeconds: {{ .Values.timeoutSeconds }}
sideEffects: NoneOnDryRun
admissionReviewVersions:
- v1
reinvocationPolicy: IfNeeded

View File

@ -0,0 +1,52 @@
{{ if .Values.certManager.enabled }}
---
# Create a selfsigned Issuer, in order to create a root CA certificate for
# signing webhook serving certificates
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
namespace: {{ .Release.Namespace }}
name: {{ template "newrelic-infra-operator.fullname" . }}-self-signed-issuer
spec:
selfSigned: {}
---
# Generate a CA Certificate used to sign certificates for the webhook
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
namespace: {{ .Release.Namespace }}
name: {{ template "newrelic-infra-operator.fullname" . }}-root-cert
spec:
secretName: {{ template "newrelic-infra-operator.fullname" . }}-root-cert
duration: 43800h # 5y
issuerRef:
name: {{ template "newrelic-infra-operator.fullname" . }}-self-signed-issuer
commonName: "ca.webhook.nri"
isCA: true
---
# Create an Issuer that uses the above generated CA certificate to issue certs
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
namespace: {{ .Release.Namespace }}
name: {{ template "newrelic-infra-operator.fullname" . }}-root-issuer
spec:
ca:
secretName: {{ template "newrelic-infra-operator.fullname" . }}-root-cert
---
# Finally, generate a serving certificate for the webhook to use
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
namespace: {{ .Release.Namespace }}
name: {{ template "newrelic-infra-operator.fullname" . }}-webhook-cert
spec:
secretName: {{ template "newrelic-infra-operator.fullname" . }}-admission
duration: 8760h # 1y
issuerRef:
name: {{ template "newrelic-infra-operator.fullname" . }}-root-issuer
dnsNames:
- {{ template "newrelic-infra-operator.fullname" . }}
- {{ template "newrelic-infra-operator.fullname" . }}.{{ .Release.Namespace }}
- {{ template "newrelic-infra-operator.fullname" . }}.{{ .Release.Namespace }}.svc
{{ end }}

View File

@ -0,0 +1,52 @@
{{- define "newrelic-infra-operator.infra-agent-monitoring-rules" -}}
- apiGroups: [""]
resources:
- "nodes"
- "nodes/metrics"
- "nodes/stats"
- "nodes/proxy"
- "pods"
- "services"
verbs: ["get", "list"]
- nonResourceURLs: ["/metrics"]
verbs: ["get"]
{{- end -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "newrelic-infra-operator.fullname" . }}
labels:
{{- include "newrelic-infra-operator.labels" . | nindent 4 }}
rules:
{{/* Allow creating and updating secrets with license key for infra agent. */ -}}
- apiGroups: [""]
resources:
- "secrets"
verbs: ["get", "update", "patch"]
resourceNames: [{{ template "newrelic-infra-operator.fullname" . }}-config]
{{/* resourceNames used above do not support "create" verb. */ -}}
- apiGroups: [""]
resources:
- "secrets"
verbs: ["create"]
{{/* "list" and "watch" are required for controller-runtime caching. */ -}}
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["clusterrolebindings"]
verbs: ["list", "watch", "get"]
{{/* Our controller needs permission to add the ServiceAccounts from the user to the -infra-agent CRB. */ -}}
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["clusterrolebindings"]
verbs: ["update"]
resourceNames: [{{ template "newrelic-infra-operator.fullname" . }}-infra-agent]
{{- /* Controller must have permissions it will grant to other ServiceAccounts. */ -}}
{{- include "newrelic-infra-operator.infra-agent-monitoring-rules" . | nindent 2 }}
---
{{/* infra-agent is the ClusterRole to be used by the injected agents to get metrics */}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "newrelic-infra-operator.fullname" . }}-infra-agent
labels: {{- include "newrelic-infra-operator.labels" . | nindent 4 }}
rules:
{{- include "newrelic-infra-operator.infra-agent-monitoring-rules" . | nindent 2 }}

View File

@ -0,0 +1,26 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "newrelic-infra-operator.fullname" . }}
labels:
{{- include "newrelic-infra-operator.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "newrelic-infra-operator.fullname" . }}
subjects:
- kind: ServiceAccount
name: {{ template "newrelic-infra-operator.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
---
{{/* infra-agent is the ClusterRoleBinding to be used by the ServiceAccounts of the injected agents */}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "newrelic-infra-operator.fullname" . }}-infra-agent
labels:
{{- include "newrelic-infra-operator.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "newrelic-infra-operator.fullname" . }}-infra-agent

View File

@ -0,0 +1,35 @@
{{- define "fargate-config" -}}
infraAgentInjection:
resourcePrefix: {{ template "newrelic-infra-operator.fullname" . }}
{{- if include "newrelic.fargate" . }}
{{- if not .Values.config.infraAgentInjection.policies }}
policies:
- podSelector:
matchExpressions:
- key: "eks.amazonaws.com/fargate-profile"
operator: Exists
{{- end }}
agentConfig:
{{- if not .Values.config.infraAgentInjection.agentConfig.customAttributes }}
customAttributes:
- name: computeType
defaultValue: serverless
- name: fargateProfile
fromLabel: eks.amazonaws.com/fargate-profile
{{- end -}}
{{- end -}}
{{- end -}}
{{- define "config" -}}
{{ toYaml (merge (include "fargate-config" . | fromYaml) .Values.config) }}
{{- end }}
apiVersion: v1
kind: ConfigMap
metadata:
namespace: {{ .Release.Namespace }}
name: {{ template "newrelic-infra-operator.fullname" . }}-config
labels:
{{- include "newrelic-infra-operator.labels" . | nindent 4 }}
data:
operator.yaml: {{- include "config" . | toYaml | nindent 4 }}

View File

@ -0,0 +1,90 @@
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: {{ .Release.Namespace }}
name: {{ template "newrelic-infra-operator.fullname" . }}
labels:
{{- include "newrelic-infra-operator.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicas }}
selector:
matchLabels:
{{- include "newrelic-infra-operator.appLabel" . | nindent 6 }}
template:
metadata:
annotations:
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}
{{- if .Values.podAnnotations }}
{{- toYaml .Values.podAnnotations | nindent 8 }}
{{- end }}
labels:
{{- include "newrelic-infra-operator.labels" . | nindent 8 }}
spec:
serviceAccountName: {{ template "newrelic-infra-operator.serviceAccountName" . }}
{{- if .Values.podSecurityContext.enabled }}
securityContext:
runAsUser: {{ .Values.podSecurityContext.runAsUser }}
runAsGroup: {{ .Values.podSecurityContext.runAsGroup }}
fsGroup: {{ .Values.podSecurityContext.fsGroup }}
{{- end }}
{{- if .Values.image.pullSecrets }}
imagePullSecrets:
{{- toYaml .Values.image.pullSecrets | nindent 8 }}
{{- end }}
containers:
- name: {{ template "newrelic-infra-operator.name" . }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: "{{ .Values.image.pullPolicy }}"
env:
- name: CLUSTER_NAME
value: {{ include "newrelic-infra-operator.cluster" . }}
- name: NRIA_LICENSE_KEY
valueFrom:
secretKeyRef:
{{- if (include "newrelic-infra-operator.licenseKey" .) }}
name: {{ template "newrelic-infra-operator.fullname" . }}-license
key: license
{{- else if include "newrelic-infra-operator.customSecretName" . }}
name: {{ include "newrelic-infra-operator.customSecretName" . }}
key: {{ include "newrelic-infra-operator.customSecretLicenseKey" . }}
{{- else }}
{{- "" | required "Cannot find License Key, either licenseKey or customSecretName must be defined" }}
{{- end }}
volumeMounts:
- name: config
mountPath: /etc/newrelic/newrelic-infra-operator/
- name: tls-key-cert-pair
mountPath: /tmp/k8s-webhook-server/serving-certs/
readinessProbe:
httpGet:
path: /healthz
port: 9440
initialDelaySeconds: 1
periodSeconds: 1
{{- if .Values.resources }}
resources:
{{- toYaml .Values.resources | nindent 10 }}
{{- end }}
volumes:
- name: config
configMap:
name: {{ template "newrelic-infra-operator.fullname" . }}-config
- name: tls-key-cert-pair
secret:
secretName: {{ template "newrelic-infra-operator.fullname" . }}-admission
{{- if $.Values.priorityClassName }}
priorityClassName: {{ $.Values.priorityClassName }}
{{- end }}
{{- if $.Values.nodeSelector }}
nodeSelector:
{{- toYaml $.Values.nodeSelector | nindent 8 }}
{{- end }}
{{- if .Values.tolerations }}
tolerations:
{{- toYaml .Values.tolerations | nindent 8 }}
{{- end }}
{{- if .Values.affinity }}
affinity:
{{- toYaml .Values.affinity | nindent 8 }}
{{- end }}

View File

@ -0,0 +1,13 @@
{{- $licenseKey := include "newrelic-infra-operator.licenseKey" . -}}
{{- if $licenseKey }}
apiVersion: v1
kind: Secret
metadata:
namespace: {{ .Release.Namespace }}
name: {{ template "newrelic-infra-operator.fullname" . }}-license
labels:
{{ include "newrelic-infra-operator.labels" . | indent 4 }}
type: Opaque
data:
license: {{ $licenseKey | b64enc }}
{{- end }}

View File

@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
namespace: {{ .Release.Namespace }}
name: {{ template "newrelic-infra-operator.fullname" . }}
labels:
{{- include "newrelic-infra-operator.labels" . | nindent 4 }}
spec:
ports:
- port: 443
targetPort: 9443
selector:
{{- include "newrelic-infra-operator.appLabel" . | nindent 4 }}

View File

@ -0,0 +1,7 @@
apiVersion: v1
kind: ServiceAccount
metadata:
namespace: {{ .Release.Namespace }}
name: {{ template "newrelic-infra-operator.serviceAccountName" . }}
labels:
{{- include "newrelic-infra-operator.labels" . | nindent 4 }}

View File

@ -0,0 +1,190 @@
# IMPORTANT: The Kubernetes cluster name
# https://docs.newrelic.com/docs/kubernetes-monitoring-integration
#
# licenseKey:
# cluster:
# fargate:
# IMPORTANT: the previous values can also be set as global so that they
# can be shared by other newrelic product's charts.
#
# global:
# licenseKey:
# cluster:
# fargate
image:
repository: newrelic/newrelic-infra-operator
tag: "" # Defaults to AppVersion
pullPolicy: IfNotPresent
# It is possible to specify docker registry credentials.
# See https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
# pullSecrets:
# - name: regsecret
admissionWebhooksPatchJob:
image:
repository: k8s.gcr.io/ingress-nginx/kube-webhook-certgen
tag: v1.1.1
pullPolicy: IfNotPresent
# pullSecrets:
# - name: regsecret
# Volume mounts to add to the job, you might want to mount tmp if Pod Security Policies.
# Enforce a read-only root.
volumeMounts: []
# - name: tmp
# mountPath: /tmp
#
# Volumes to add to the job container.
volumes: []
# - name: tmp
# emptyDir: {}
replicas: 1
resources:
limits:
memory: 80M
requests:
cpu: 100m
memory: 30M
serviceAccount:
# Specifies whether a ServiceAccount should be created for the job and the deployment.
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template.
name:
# Configure podSecurityContext
podSecurityContext:
enabled: false
fsGroup: 1001
runAsUser: 1001
runAsGroup: 1001
# Use cert manager for webhook certs, rather than the built-in patch job.
certManager:
enabled: false
# If you wish to provide additional annotations to apply to the pod(s), specify them here.
# podAnnotations:
# Pod scheduling priority
# Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
# priorityClassName: high-priority
# Webhook timeout.
# Configure how long the API server should wait for a webhook to respond before treating the call as a failure.
# Ref: https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#timeouts
timeoutSeconds: 30
# Operator configuration
# The following are the default values for the operator
config:
## IgnoreMutationErrors instruments the operator to ignore injection error instead of failing.
## If set to false errors of the injection could block the creation of pods.
ignoreMutationErrors: true
## configuration of the sidecar injection webhook
infraAgentInjection:
## All policies are ORed, if one policy matches the sidecar is injected.
## Within a policy PodSelectors, NamespaceSelector and NamespaceName are ANDed, any of these, if not specified, is ignored.
## The following policy is injected if global.fargate=true and matches all pods belonging to any fargate profile.
# policies:
# - podSelector:
# matchExpressions:
# - key: "eks.amazonaws.com/fargate-profile"
# operator: Exists
## Also NamespaceName and NamespaceSelector can be leveraged.
# namespaceName: "my-namespace"
# namespaceSelector: {}
## agentConfig contains the configuration for the container agent injected
agentConfig:
## CustomAttributes allows to pass any custom attribute to the injected infra agents.
## The value is computed either from the defaultValue or taken at injected time from Label specified in "fromLabel".
## Either the label should exist or the default should be specified in order to have the injection working.
# customAttributes:
# - name: computeType
# defaultValue: serverless
# - name: fargateProfile
# fromLabel: eks.amazonaws.com/fargate-profile
## Image of the infrastructure agent to be injected.
image:
repository: newrelic/infrastructure-k8s
tag: 2.8.2-unprivileged
pullPolicy: IfNotPresent
## configSelectors is the way to configure resource requirements and extra envVars of the injected sidecar container.
## When mutating it will be applied the first configuration having the labelSelector matching with the mutating pod.
configSelectors:
## resourceRequirements to apply to the injected sidecar.
- resourceRequirements:
limits:
memory: 100M
cpu: 200m
requests:
memory: 50M
cpu: 100m
## extraEnvVars to pass to the injected sidecar.
extraEnvVars:
DISABLE_KUBE_STATE_METRICS: "true"
# NRIA_VERBOSE: "1"
labelSelector:
matchExpressions:
- key: "app.kubernetes.io/name"
operator: NotIn
values: ["kube-state-metrics"]
- key: "app"
operator: NotIn
values: ["kube-state-metrics"]
- key: "k8s-app"
operator: NotIn
values: ["kube-state-metrics"]
- resourceRequirements:
limits:
memory: 300M
cpu: 300m
requests:
memory: 150M
cpu: 150m
labelSelector:
matchLabels:
k8s-app: kube-state-metrics
# extraEnvVars:
# NRIA_VERBOSE: "1"
- resourceRequirements:
limits:
memory: 300M
cpu: 300m
requests:
memory: 150M
cpu: 150m
labelSelector:
matchLabels:
app: kube-state-metrics
# extraEnvVars:
# NRIA_VERBOSE: "1"
- resourceRequirements:
limits:
memory: 300M
cpu: 300m
requests:
memory: 150M
cpu: 150m
labelSelector:
matchLabels:
app.kubernetes.io/name: kube-state-metrics
# extraEnvVars:
# NRIA_VERBOSE: "1"
## pod Security Context of the sidecar injected.
## Notice that ReadOnlyRootFilesystem and AllowPrivilegeEscalation enforced respectively to true and to false.
# podSecurityContext:
# RunAsUser:
# RunAsGroup:
fullnameOverride: ""
affinity: {}
tolerations: []

View File

@ -0,0 +1,6 @@
dependencies:
- name: common-library
repository: https://helm-charts.newrelic.com
version: 1.0.2
digest: sha256:22d56c5e643d46e9a1675354595ca6832a0f4db8422d89cb7db73a3b0d0d7873
generated: "2022-04-22T16:59:10.493566+02:00"

View File

@ -0,0 +1,36 @@
apiVersion: v2
appVersion: 3.1.1
dependencies:
- name: common-library
repository: https://helm-charts.newrelic.com
version: 1.0.2
description: A Helm chart to deploy the New Relic Kubernetes monitoring solution
home: https://docs.newrelic.com/docs/kubernetes-pixie/kubernetes-integration/get-started/introduction-kubernetes-integration/
icon: https://newrelic.com/themes/custom/curio/assets/mediakit/NR_logo_Horizontal.svg
keywords:
- infrastructure
- newrelic
- monitoring
maintainers:
- name: alvarocabanas
url: https://github.com/alvarocabanas
- name: carlossscastro
url: https://github.com/carlossscastro
- name: sigilioso
url: https://github.com/sigilioso
- name: gsanchezgavier
url: https://github.com/gsanchezgavier
- name: kang-makes
url: https://github.com/kang-makes
- name: marcsanmi
url: https://github.com/marcsanmi
- name: paologallinaharbur
url: https://github.com/paologallinaharbur
- name: roobre
url: https://github.com/roobre
name: newrelic-infrastructure
sources:
- https://github.com/newrelic/nri-kubernetes/
- https://github.com/newrelic/nri-kubernetes/tree/master/charts/newrelic-infrastructure
- https://github.com/newrelic/infrastructure-agent/
version: 3.3.3

View File

@ -0,0 +1,209 @@
# newrelic-infrastructure
![Version: 3.3.3](https://img.shields.io/badge/Version-3.3.3-informational?style=flat-square) ![AppVersion: 3.1.1](https://img.shields.io/badge/AppVersion-3.1.1-informational?style=flat-square)
A Helm chart to deploy the New Relic Kubernetes monitoring solution
**Homepage:** <https://docs.newrelic.com/docs/kubernetes-pixie/kubernetes-integration/get-started/introduction-kubernetes-integration/>
# Helm installation
You can install this chart using [`nri-bundle`](https://github.com/newrelic/helm-charts/tree/master/charts/nri-bundle) located in the
[helm-charts repository](https://github.com/newrelic/helm-charts) or directly from this repository by adding this Helm repository:
```shell
helm repo add nri-kubernetes https://newrelic.github.io/nri-kubernetes
helm upgrade --install nri-kubernetes/newrelic-infrastructure -f your-custom-values.yaml
```
## Source Code
* <https://github.com/newrelic/nri-kubernetes/>
* <https://github.com/newrelic/nri-kubernetes/tree/master/charts/newrelic-infrastructure>
* <https://github.com/newrelic/infrastructure-agent/>
## Values managed globally
This chart implements the [New Relic's common Helm library](https://github.com/newrelic/helm-charts/tree/master/library/common-library) which
means that it honors a wide range of defaults and globals common to most New Relic Helm charts.
Options that can be defined globally include `affinity`, `nodeSelector`, `tolerations`, `proxy` and others. The full list can be found at
[user's guide of the common library](https://github.com/newrelic/helm-charts/blob/master/library/common-library/README.md).
## Chart particularities
### Low data mode
There are two mechanisms to reduce the amount of data that this integration sends to New Relic. See this snippet from the `values.yaml` file:
```yaml
common:
config:
interval: 15s
lowDataMode: false
```
The `lowDataMode` toggle is the simplest way to reduce data send to Newrelic. Setting it to `true` changes the default scrape interval from 15 seconds
(the default) to 30 seconds.
If you need for some reason to fine-tune the number of seconds you can use `common.config.interval` directly. If you take a look at the `values.yaml`
file, the value there is `nil`. If any value is set there, the `lowDataMode` toggle is ignored as this value takes precedence.
Setting this interval above 40 seconds can make you experience issues with the Kubernetes Cluster Explorer so this chart limits setting the interval
inside the range of 10 to 40 seconds.
### Affinities and tolerations
The New Relic common library allows to set affinities, tolerations, and node selectors globally using e.g. `.global.affinity` to ease the configuration
when you use this chart using `nri-bundle`. This chart has an extra level of granularity to the components that it deploys:
control plane, ksm, and kubelet.
Take this snippet as an example:
```yaml
global:
affinity: {}
affinity: {}
kubelet:
affinity: {}
ksm:
affinity: {}
controlPlane:
affinity: {}
```
The order to set an affinity is to set first any `kubelet.affinity`, `ksm.affinity`, or `controlPlane.affinity`. If these values are empty the chart
fallbacks to `affinity` (at root level), and if that value is empty, the chart fallbacks to `global.affinity`.
The same procedure applies to `nodeSelector` and `tolerations`.
On the other hand, some components have affinities and tolerations predefined e.g. to be able to run kubelet pods on nodes that are tainted as master
nodes or to schedule the KSM scraper on the same node of KSM to reduce the inter-node traffic.
If you are having problems assigning pods to nodes it may be because of this. Take a look at the [`values.yaml`](values.yaml) to see if the pod that is
not having your expected behavior has any predefined value.
### `hostNetwork` toggle
In versions below v3, changing the `privileged` mode affected the `hostNetwork`. We changed this behavior and now you can set pods to use `hostNetwork`
using the corresponding [flags from the common library](https://github.com/newrelic/helm-charts/blob/master/library/common-library/README.md)
(`.global.hostNetwork` and `.hostNetwork`) but the component that scrapes data from the control plane has always set `hostNetwork` enabled by default
(Look in the [`values.yaml`](values.yaml) for `controlPlane.hostNetwork: true`)
This is because the most common configuration of the control plane components is to be configured to listen only to `localhost`.
If your cluster security policy does not allow to use `hostNetwork`, you can disable it control plane monitoring by setting `controlPlane.enabled` to
`false.`
### `privileged` toggle
The default value for `privileged` [from the common library](https://github.com/newrelic/helm-charts/blob/master/library/common-library/README.md) is
`false` but in this particular this chart it is set to `true` (Look in the [`values.yaml`](values.yaml) for `privileged: true`)
This is because when `kubelet` pods need to run in privileged mode to fetch cpu, memory, process, and network metrics of your nodes.
If your cluster security policy does not allow to have `privileged` in your pod' security context, you can disable it by setting `privileged` to
`false` taking into account that you will lose all the metrics from the host and some metadata from the host that are added to the metrics of the
integrations that you have configured.
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| affinity | object | `{}` | Sets pod/node affinities set almost globally. (See [Affinities and tolerations](README.md#affinities-and-tolerations)) |
| cluster | string | `""` | Name of the Kubernetes cluster monitored. Can be configured also with `global.cluster` |
| common | object | See `values.yaml` | Config that applies to all instances of the solution: kubelet, ksm, control plane and sidecars. |
| common.agentConfig | object | `{}` | Config for the Infrastructure agent. Will be used by the forwarder sidecars and the agent running integrations. See: https://docs.newrelic.com/docs/infrastructure/install-infrastructure-agent/configuration/infrastructure-agent-configuration-settings/ |
| common.config.interval | duration | `15s` (See [Low data mode](README.md#low-data-mode)) | Intervals larger than 40s are not supported and will cause the NR UI to not behave properly. Any non-nil value will override the `lowDataMode` default. |
| containerSecurityContext | object | `{}` | Sets security context (at container level). Can be configured also with `global.containerSecurityContext` |
| controlPlane | object | See `values.yaml` | Configuration for the control plane scraper. |
| controlPlane.affinity | object | Deployed only in master nodes. | Affinity for the control plane DaemonSet. |
| controlPlane.config.apiServer | object | Common settings for most K8s distributions. | API Server monitoring configuration |
| controlPlane.config.apiServer.enabled | bool | `true` | Enable API Server monitoring |
| controlPlane.config.controllerManager | object | Common settings for most K8s distributions. | Controller manager monitoring configuration |
| controlPlane.config.controllerManager.enabled | bool | `true` | Enable controller manager monitoring. |
| controlPlane.config.etcd | object | Common settings for most K8s distributions. | ETCD monitoring configuration |
| controlPlane.config.etcd.enabled | bool | `true` | Enable etcd monitoring. Might require manual configuration in some environments. |
| controlPlane.config.retries | int | `3` | Number of retries after timeout expired |
| controlPlane.config.scheduler | object | Common settings for most K8s distributions. | Scheduler monitoring configuration |
| controlPlane.config.scheduler.enabled | bool | `true` | Enable scheduler monitoring. |
| controlPlane.config.timeout | string | `"10s"` | Timeout for the Kubernetes APIs contacted by the integration |
| controlPlane.enabled | bool | `true` | Deploy control plane monitoring component. |
| controlPlane.hostNetwork | bool | `true` | Run Control Plane scraper with `hostNetwork`. `hostNetwork` is required for most control plane configurations, as they only accept connections from localhost. |
| controlPlane.kind | string | `"DaemonSet"` | How to deploy the control plane scraper. If autodiscovery is in use, it should be `DaemonSet`. Advanced users using static endpoints set this to `Deployment` to avoid reporting metrics twice. |
| customAttributes | object | `{}` | Adds extra attributes to the cluster and all the metrics emitted to the backend. Can be configured also with `global.customAttributes` |
| customSecretLicenseKey | string | `""` | In case you don't want to have the license key in you values, this allows you to point to which secret key is the license key located. Can be configured also with `global.customSecretLicenseKey` |
| customSecretName | string | `""` | In case you don't want to have the license key in you values, this allows you to point to a user created secret to get the key from there. Can be configured also with `global.customSecretName` |
| dnsConfig | object | `{}` | Sets pod's dnsConfig. Can be configured also with `global.dnsConfig` |
| fedramp.enabled | bool | `false` | Enables FedRAMP. Can be configured also with `global.fedramp.enabled` |
| fullnameOverride | string | `""` | Override the full name of the release |
| hostNetwork | bool | `false` | Sets pod's hostNetwork. Can be configured also with `global.hostNetwork` |
| images | object | See `values.yaml` | Images used by the chart for the integration and agents. |
| images.agent | object | See `values.yaml` | Image for the New Relic Infrastructure Agent plus integrations. |
| images.forwarder | object | See `values.yaml` | Image for the New Relic Infrastructure Agent sidecar. |
| images.integration | object | See `values.yaml` | Image for the New Relic Kubernetes integration. |
| images.pullSecrets | list | `[]` | The secrets that are needed to pull images from a custom registry. |
| integrations | object | `{}` | Config files for other New Relic integrations that should run in this cluster. |
| ksm | object | See `values.yaml` | Configuration for the Deployment that collects state metrics from KSM (kube-state-metrics). |
| ksm.affinity | object | Deployed in the same node as KSM | Affinity for the control plane DaemonSet. |
| ksm.config.retries | int | `3` | Number of retries after timeout expired |
| ksm.config.timeout | string | `"10s"` | Timeout for the ksm API contacted by the integration |
| ksm.enabled | bool | `true` | Enable cluster state monitoring. Advanced users only. Setting this to `false` is not supported and will break the New Relic experience. |
| ksm.resources | object | 100m/150M -/850M | Resources for the KSM scraper pod. Keep in mind that sharding is not supported at the moment, so memory usage for this component ramps up quickly on large clusters. |
| ksm.tolerations | list | Schedules in all tainted nodes | Affinity for the control plane DaemonSet. |
| kubelet | object | See `values.yaml` | Configuration for the DaemonSet that collects metrics from the Kubelet. |
| kubelet.config.retries | int | `3` | Number of retries after timeout expired |
| kubelet.config.timeout | string | `"10s"` | Timeout for the kubelet APIs contacted by the integration |
| kubelet.enabled | bool | `true` | Enable kubelet monitoring. Advanced users only. Setting this to `false` is not supported and will break the New Relic experience. |
| kubelet.tolerations | list | Schedules in all tainted nodes | Affinity for the control plane DaemonSet. |
| labels | object | `{}` | Additional labels for chart objects. Can be configured also with `global.labels` |
| licenseKey | string | `""` | This set this license key to use. Can be configured also with `global.licenseKey` |
| lowDataMode | bool | `false` (See [Low data mode](README.md#low-data-mode)) | Send less data by incrementing the interval from `15s` (the default when `lowDataMode` is `false` or `nil`) to `30s`. Non-nil values of `common.config.interval` will override this value. |
| nameOverride | string | `""` | Override the name of the chart |
| nodeSelector | object | `{}` | Sets pod's node selector almost globally. (See [Affinities and tolerations](README.md#affinities-and-tolerations)) |
| nrStaging | bool | `false` | Send the metrics to the staging backend. Requires a valid staging license key. Can be configured also with `global.nrStaging` |
| podAnnotations | object | `{}` | Annotations to be added to all pods created by the integration. |
| podLabels | object | `{}` | Additional labels for chart pods. Can be configured also with `global.podLabels` |
| podSecurityContext | object | `{}` | Sets security context (at pod level). Can be configured also with `global.podSecurityContext` |
| priorityClassName | string | `""` | Sets pod's priorityClassName. Can be configured also with `global.priorityClassName` |
| privileged | bool | `true` | Run the integration with full access to the host filesystem and network. Running in this mode allows reporting fine-grained cpu, memory, process and network metrics for your nodes. |
| proxy | string | `""` | Configures the integration to send all HTTP/HTTPS request through the proxy in that URL. The URL should have a standard format like `https://user:password@hostname:port`. Can be configured also with `global.proxy` |
| rbac | object | `{"create":true,"pspEnabled":false}` | Settings controlling RBAC objects creation. |
| rbac.create | bool | `true` | Whether the chart should automatically create the RBAC objects required to run. |
| rbac.pspEnabled | bool | `false` | Whether the chart should create Pod Security Policy objects. |
| serviceAccount | object | See `values.yaml` | Settings controlling ServiceAccount creation. |
| serviceAccount.create | bool | `true` | Whether the chart should automatically create the ServiceAccount objects required to run. |
| tolerations | list | `[]` | Sets pod's tolerations to node taints almost globally. (See [Affinities and tolerations](README.md#affinities-and-tolerations)) |
| updateStrategy | object | See `values.yaml` | Update strategy for the DaemonSets deployed. |
| verboseLog | bool | `false` | Sets the debug logs to this integration or all integrations if it is set globally. Can be configured also with `global.verboseLog` |
## Maintainers
* [alvarocabanas](https://github.com/alvarocabanas)
* [carlossscastro](https://github.com/carlossscastro)
* [sigilioso](https://github.com/sigilioso)
* [gsanchezgavier](https://github.com/gsanchezgavier)
* [kang-makes](https://github.com/kang-makes)
* [marcsanmi](https://github.com/marcsanmi)
* [paologallinaharbur](https://github.com/paologallinaharbur)
* [roobre](https://github.com/roobre)
## Past Contributors
Previous iterations of this chart started as a community project in the [stable Helm chart repository](github.com/helm/charts/). New Relic is very thankful for all the 15+ community members that contributed and helped maintain the chart there over the years:
* coreypobrien
* sstarcher
* jmccarty3
* slayerjain
* ryanhope2
* rk295
* michaelajr
* isindir
* idirouhab
* ismferd
* enver
* diclophis
* jeffdesc
* costimuraru
* verwilst
* ezelenka

View File

@ -0,0 +1,139 @@
{{ template "chart.header" . }}
{{ template "chart.deprecationWarning" . }}
{{ template "chart.badgesSection" . }}
{{ template "chart.description" . }}
{{ template "chart.homepageLine" . }}
# Helm installation
You can install this chart using [`nri-bundle`](https://github.com/newrelic/helm-charts/tree/master/charts/nri-bundle) located in the
[helm-charts repository](https://github.com/newrelic/helm-charts) or directly from this repository by adding this Helm repository:
```shell
helm repo add nri-kubernetes https://newrelic.github.io/nri-kubernetes
helm upgrade --install nri-kubernetes/newrelic-infrastructure -f your-custom-values.yaml
```
{{ template "chart.sourcesSection" . }}
## Values managed globally
This chart implements the [New Relic's common Helm library](https://github.com/newrelic/helm-charts/tree/master/library/common-library) which
means that it honors a wide range of defaults and globals common to most New Relic Helm charts.
Options that can be defined globally include `affinity`, `nodeSelector`, `tolerations`, `proxy` and others. The full list can be found at
[user's guide of the common library](https://github.com/newrelic/helm-charts/blob/master/library/common-library/README.md).
## Chart particularities
### Low data mode
There are two mechanisms to reduce the amount of data that this integration sends to New Relic. See this snippet from the `values.yaml` file:
```yaml
common:
config:
interval: 15s
lowDataMode: false
```
The `lowDataMode` toggle is the simplest way to reduce data send to Newrelic. Setting it to `true` changes the default scrape interval from 15 seconds
(the default) to 30 seconds.
If you need for some reason to fine-tune the number of seconds you can use `common.config.interval` directly. If you take a look at the `values.yaml`
file, the value there is `nil`. If any value is set there, the `lowDataMode` toggle is ignored as this value takes precedence.
Setting this interval above 40 seconds can make you experience issues with the Kubernetes Cluster Explorer so this chart limits setting the interval
inside the range of 10 to 40 seconds.
### Affinities and tolerations
The New Relic common library allows to set affinities, tolerations, and node selectors globally using e.g. `.global.affinity` to ease the configuration
when you use this chart using `nri-bundle`. This chart has an extra level of granularity to the components that it deploys:
control plane, ksm, and kubelet.
Take this snippet as an example:
```yaml
global:
affinity: {}
affinity: {}
kubelet:
affinity: {}
ksm:
affinity: {}
controlPlane:
affinity: {}
```
The order to set an affinity is to set first any `kubelet.affinity`, `ksm.affinity`, or `controlPlane.affinity`. If these values are empty the chart
fallbacks to `affinity` (at root level), and if that value is empty, the chart fallbacks to `global.affinity`.
The same procedure applies to `nodeSelector` and `tolerations`.
On the other hand, some components have affinities and tolerations predefined e.g. to be able to run kubelet pods on nodes that are tainted as master
nodes or to schedule the KSM scraper on the same node of KSM to reduce the inter-node traffic.
If you are having problems assigning pods to nodes it may be because of this. Take a look at the [`values.yaml`](values.yaml) to see if the pod that is
not having your expected behavior has any predefined value.
### `hostNetwork` toggle
In versions below v3, changing the `privileged` mode affected the `hostNetwork`. We changed this behavior and now you can set pods to use `hostNetwork`
using the corresponding [flags from the common library](https://github.com/newrelic/helm-charts/blob/master/library/common-library/README.md)
(`.global.hostNetwork` and `.hostNetwork`) but the component that scrapes data from the control plane has always set `hostNetwork` enabled by default
(Look in the [`values.yaml`](values.yaml) for `controlPlane.hostNetwork: true`)
This is because the most common configuration of the control plane components is to be configured to listen only to `localhost`.
If your cluster security policy does not allow to use `hostNetwork`, you can disable it control plane monitoring by setting `controlPlane.enabled` to
`false.`
### `privileged` toggle
The default value for `privileged` [from the common library](https://github.com/newrelic/helm-charts/blob/master/library/common-library/README.md) is
`false` but in this particular this chart it is set to `true` (Look in the [`values.yaml`](values.yaml) for `privileged: true`)
This is because when `kubelet` pods need to run in privileged mode to fetch cpu, memory, process, and network metrics of your nodes.
If your cluster security policy does not allow to have `privileged` in your pod' security context, you can disable it by setting `privileged` to
`false` taking into account that you will lose all the metrics from the host and some metadata from the host that are added to the metrics of the
integrations that you have configured.
{{ template "chart.valuesSection" . }}
{{ if .Maintainers }}
## Maintainers
{{ range .Maintainers }}
{{- if .Name }}
{{- if .Url }}
* [{{ .Name }}]({{ .Url }})
{{- else }}
* {{ .Name }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
## Past Contributors
Previous iterations of this chart started as a community project in the [stable Helm chart repository](github.com/helm/charts/). New Relic is very thankful for all the 15+ community members that contributed and helped maintain the chart there over the years:
* coreypobrien
* sstarcher
* jmccarty3
* slayerjain
* ryanhope2
* rk295
* michaelajr
* isindir
* idirouhab
* ismferd
* enver
* diclophis
* jeffdesc
* costimuraru
* verwilst
* ezelenka

View File

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,25 @@
apiVersion: v2
description: Provides helpers to provide consistency on all the charts
keywords:
- newrelic
- chart-library
maintainers:
- name: alvarocabanas
url: https://github.com/alvarocabanas
- name: carlossscastro
url: https://github.com/carlossscastro
- name: gsanchezgavier
url: https://github.com/gsanchezgavier
- name: kang-makes
url: https://github.com/kang-makes
- name: marcsanmi
url: https://github.com/marcsanmi
- name: paologallinaharbur
url: https://github.com/paologallinaharbur
- name: roobre
url: https://github.com/roobre
- name: sigilioso
url: https://github.com/sigilioso
name: common-library
type: library
version: 1.0.2

View File

@ -0,0 +1,10 @@
{{- /* Defines the Pod affinity */ -}}
{{- define "newrelic.common.affinity" -}}
{{- if .Values.affinity -}}
{{- toYaml .Values.affinity -}}
{{- else if .Values.global -}}
{{- if .Values.global.affinity -}}
{{- toYaml .Values.global.affinity -}}
{{- end -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,25 @@
{{/*
This helper should return the defaults that all agents should have
*/}}
{{- define "newrelic.common.agentConfig.defaults" -}}
{{- if include "newrelic.common.verboseLog" . }}
verbose: 1
{{- end }}
{{- if (include "newrelic.common.nrStaging" . ) }}
staging: true
{{- end }}
{{- with include "newrelic.common.proxy" . }}
proxy: {{ . | quote }}
{{- end }}
{{- with include "newrelic.common.fedramp.enabled" . }}
fedramp: {{ . }}
{{- end }}
{{- with fromYaml ( include "newrelic.common.customAttributes" . ) }}
custom_attributes:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- end -}}

View File

@ -0,0 +1,15 @@
{{/*
Return the cluster
*/}}
{{- define "newrelic.common.cluster" -}}
{{- /* This allows us to use `$global` as an empty dict directly in case `Values.global` does not exists */ -}}
{{- $global := index .Values "global" | default dict -}}
{{- if .Values.cluster -}}
{{- .Values.cluster -}}
{{- else if $global.cluster -}}
{{- $global.cluster -}}
{{- else -}}
{{ fail "There is not cluster name definition set neither in `.global.cluster' nor `.cluster' in your values.yaml. Cluster name is required." }}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,17 @@
{{/*
This will render custom attributes as a YAML ready to be templated or be used with `fromYaml`.
*/}}
{{- define "newrelic.common.customAttributes" -}}
{{- $customAttributes := dict -}}
{{- $global := index .Values "global" | default dict -}}
{{- if $global.customAttributes -}}
{{- $customAttributes = mergeOverwrite $customAttributes $global.customAttributes -}}
{{- end -}}
{{- if .Values.customAttributes -}}
{{- $customAttributes = mergeOverwrite $customAttributes .Values.customAttributes -}}
{{- end -}}
{{- toYaml $customAttributes -}}
{{- end -}}

View File

@ -0,0 +1,10 @@
{{- /* Defines the Pod dnsConfig */ -}}
{{- define "newrelic.common.dnsConfig" -}}
{{- if .Values.dnsConfig -}}
{{- toYaml .Values.dnsConfig -}}
{{- else if .Values.global -}}
{{- if .Values.global.dnsConfig -}}
{{- toYaml .Values.global.dnsConfig -}}
{{- end -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,25 @@
{{- /* Defines the fedRAMP flag */ -}}
{{- define "newrelic.common.fedramp.enabled" -}}
{{- if .Values.fedramp -}}
{{- if .Values.fedramp.enabled -}}
{{- .Values.fedramp.enabled -}}
{{- end -}}
{{- else if .Values.global -}}
{{- if .Values.global.fedramp -}}
{{- if .Values.global.fedramp.enabled -}}
{{- .Values.global.fedramp.enabled -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- /* Return FedRAMP value directly ready to be templated */ -}}
{{- define "newrelic.common.fedramp.enabled.value" -}}
{{- if include "newrelic.common.fedramp.enabled" . -}}
true
{{- else -}}
false
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,39 @@
{{- /*
Abstraction of the hostNetwork toggle.
This helper allows to override the global `.global.hostNetwork` with the value of `.hostNetwork`.
Returns "true" if `hostNetwork` is enabled, otherwise "" (empty string)
*/ -}}
{{- define "newrelic.common.hostNetwork" -}}
{{- /* This allows us to use `$global` as an empty dict directly in case `Values.global` does not exists */ -}}
{{- $global := index .Values "global" | default dict -}}
{{- /*
`get` will return "" (empty string) if value is not found, and the value otherwise, so we can type-assert with kindIs
We also want only to return when this is true, returning `false` here will template "false" (string) when doing
an `(include "newrelic.common.hostNetwork" .)`, which is not an "empty string" so it is `true` if it is used
as an evaluation somewhere else.
*/ -}}
{{- if get .Values "hostNetwork" | kindIs "bool" -}}
{{- if .Values.hostNetwork -}}
{{- .Values.hostNetwork -}}
{{- end -}}
{{- else if get $global "hostNetwork" | kindIs "bool" -}}
{{- if $global.hostNetwork -}}
{{- $global.hostNetwork -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- /*
Abstraction of the hostNetwork toggle.
This helper abstracts the function "newrelic.common.hostNetwork" to return true or false directly.
*/ -}}
{{- define "newrelic.common.hostNetwork.value" -}}
{{- if include "newrelic.common.hostNetwork" . -}}
true
{{- else -}}
false
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,85 @@
{{- /*
Return the proper image name
{{ include "newrelic.common.images.image" ( dict "imageRoot" .Values.path.to.the.image "context" .) }}
*/ -}}
{{- define "newrelic.common.images.image" -}}
{{- $registryName := include "newrelic.common.images.registry" ( dict "imageRoot" .imageRoot "context" .context) -}}
{{- $repositoryName := include "newrelic.common.images.repository" .imageRoot -}}
{{- $tag := include "newrelic.common.images.tag" ( dict "imageRoot" .imageRoot "context" .context) -}}
{{- if $registryName -}}
{{- printf "%s/%s:%s" $registryName $repositoryName $tag | quote -}}
{{- else -}}
{{- printf "%s:%s" $repositoryName $tag | quote -}}
{{- end -}}
{{- end -}}
{{- /*
Return the proper image registry
{{ include "newrelic.common.images.registry" ( dict "imageRoot" .Values.path.to.the.image "context" .) }}
*/ -}}
{{- define "newrelic.common.images.registry" -}}
{{- if .imageRoot.registry -}}
{{- .imageRoot.registry -}}
{{- else if .context.Values.global -}}
{{- if .context.Values.global.image -}}
{{- with .context.Values.global.image.registry -}}
{{- . -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- /*
Return the proper image repository
{{ include "newrelic.common.images.repository" .Values.path.to.the.image }}
*/ -}}
{{- define "newrelic.common.images.repository" -}}
{{- .repository -}}
{{- end -}}
{{- /*
Return the proper image tag
{{ include "newrelic.common.images.tag" ( dict "imageRoot" .Values.path.to.the.image "context" .) }}
*/ -}}
{{- define "newrelic.common.images.tag" -}}
{{- .imageRoot.tag | default .context.Chart.AppVersion | toString -}}
{{- end -}}
{{- /*
Return the proper Image Pull Registry Secret Names evaluating values as templates
{{ include "newrelic.common.images.renderPullSecrets" ( dict "pullSecrets" (list .Values.path.to.the.image.pullSecrets1, .Values.path.to.the.image.pullSecrets2) "context" .) }}
*/ -}}
{{- define "newrelic.common.images.renderPullSecrets" -}}
{{- $flatlist := list }}
{{- if .context.Values.global -}}
{{- if .context.Values.global.image -}}
{{- if .context.Values.global.image.pullSecrets -}}
{{- range .context.Values.global.image.pullSecrets -}}
{{- $flatlist = append $flatlist . -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- range .pullSecrets -}}
{{- if not (empty .) -}}
{{- range . -}}
{{- $flatlist = append $flatlist . -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- if $flatlist -}}
{{- toYaml $flatlist -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,54 @@
{{/*
This will render the labels that should be used in all the manifests used by the helm chart.
*/}}
{{- define "newrelic.common.labels" -}}
{{- $global := index .Values "global" | default dict -}}
{{- $chart := dict "helm.sh/chart" (include "newrelic.common.naming.chart" . ) -}}
{{- $managedBy := dict "app.kubernetes.io/managed-by" .Release.Service -}}
{{- $selectorLabels := fromYaml (include "newrelic.common.labels.selectorLabels" . ) -}}
{{- $labels := mustMergeOverwrite $chart $managedBy $selectorLabels -}}
{{- if .Chart.AppVersion -}}
{{- $labels = mustMergeOverwrite $labels (dict "app.kubernetes.io/version" .Chart.AppVersion) -}}
{{- end -}}
{{- $globalUserLabels := $global.labels | default dict -}}
{{- $localUserLabels := .Values.labels | default dict -}}
{{- $labels = mustMergeOverwrite $labels $globalUserLabels $localUserLabels -}}
{{- toYaml $labels -}}
{{- end -}}
{{/*
This will render the labels that should be used in deployments/daemonsets template pods as a selector.
*/}}
{{- define "newrelic.common.labels.selectorLabels" -}}
{{- $name := dict "app.kubernetes.io/name" ( include "newrelic.common.naming.name" . ) -}}
{{- $instance := dict "app.kubernetes.io/instance" .Release.Name -}}
{{- $selectorLabels := mustMergeOverwrite $name $instance -}}
{{- toYaml $selectorLabels -}}
{{- end }}
{{/*
Pod labels
*/}}
{{- define "newrelic.common.labels.podLabels" -}}
{{- $selectorLabels := fromYaml (include "newrelic.common.labels.selectorLabels" . ) -}}
{{- $global := index .Values "global" | default dict -}}
{{- $globalPodLabels := $global.podLabels | default dict }}
{{- $localPodLabels := .Values.podLabels | default dict }}
{{- $podLabels := mustMergeOverwrite $selectorLabels $globalPodLabels $localPodLabels -}}
{{- toYaml $podLabels -}}
{{- end }}

View File

@ -0,0 +1,55 @@
{{/*
Return the name of the secret holding the License Key.
*/}}
{{- define "newrelic.common.license.secretName" -}}
{{ include "newrelic.common.license._customSecretName" . | default (printf "%s-license" (include "newrelic.common.naming.fullname" . )) }}
{{- end -}}
{{/*
Return the name key for the License Key inside the secret.
*/}}
{{- define "newrelic.common.license.secretKeyName" -}}
{{ include "newrelic.common.license._customSecretKey" . | default "licenseKey" }}
{{- end -}}
{{/*
Return local licenseKey if set, global otherwise.
This helper is for internal use.
*/}}
{{- define "newrelic.common.license._licenseKey" -}}
{{- if .Values.licenseKey -}}
{{- .Values.licenseKey -}}
{{- else if .Values.global -}}
{{- if .Values.global.licenseKey -}}
{{- .Values.global.licenseKey -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Return the name of the secret holding the License Key.
This helper is for internal use.
*/}}
{{- define "newrelic.common.license._customSecretName" -}}
{{- if .Values.customSecretName -}}
{{- .Values.customSecretName -}}
{{- else if .Values.global -}}
{{- if .Values.global.customSecretName -}}
{{- .Values.global.customSecretName -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Return the name key for the License Key inside the secret.
This helper is for internal use.
*/}}
{{- define "newrelic.common.license._customSecretKey" -}}
{{- if .Values.customSecretLicenseKey -}}
{{- .Values.customSecretLicenseKey -}}
{{- else if .Values.global -}}
{{- if .Values.global.customSecretLicenseKey }}
{{- .Values.global.customSecretLicenseKey -}}
{{- end -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,21 @@
{{/*
Renders the license key secret if user has not specified a custom secret.
*/}}
{{- define "newrelic.common.license.secret" }}
{{- if not (include "newrelic.common.license._customSecretName" .) }}
{{- /* Fail if licenseKey is empty and required: */ -}}
{{- if not (include "newrelic.common.license._licenseKey" .) }}
{{- fail "You must specify a licenseKey or a customSecretName containing it" }}
{{- end }}
---
apiVersion: v1
kind: Secret
metadata:
name: {{ include "newrelic.common.license.secretName" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "newrelic.common.labels" . | nindent 4 }}
data:
{{ include "newrelic.common.license.secretKeyName" . }}: {{ include "newrelic.common.license._licenseKey" . | b64enc }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,26 @@
{{- /*
Abstraction of the lowDataMode toggle.
This helper allows to override the global `.global.lowDataMode` with the value of `.lowDataMode`.
Returns "true" if `lowDataMode` is enabled, otherwise "" (empty string)
*/ -}}
{{- define "newrelic.common.lowDataMode" -}}
{{- /* `get` will return "" (empty string) if value is not found, and the value otherwise, so we can type-assert with kindIs */ -}}
{{- if (get .Values "lowDataMode" | kindIs "bool") -}}
{{- if .Values.lowDataMode -}}
{{- /*
We want only to return when this is true, returning `false` here will template "false" (string) when doing
an `(include "newrelic.common.lowDataMode" .)`, which is not an "empty string" so it is `true` if it is used
as an evaluation somewhere else.
*/ -}}
{{- .Values.lowDataMode -}}
{{- end -}}
{{- else -}}
{{- /* This allows us to use `$global` as an empty dict directly in case `Values.global` does not exists */ -}}
{{- $global := index .Values "global" | default dict -}}
{{- if get $global "lowDataMode" | kindIs "bool" -}}
{{- if $global.lowDataMode -}}
{{- $global.lowDataMode -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,73 @@
{{/*
This is an function to be called directly with a string just to truncate strings to
63 chars because some Kubernetes name fields are limited to that.
*/}}
{{- define "newrelic.common.naming.trucateToDNS" -}}
{{- . | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- /*
Given a name and a suffix returns a 'DNS Valid' which always include the suffix, truncating the name if needed.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If suffix is too long it gets truncated but it always takes precedence over name, so a 63 chars suffix would suppress the name.
Usage:
{{ include "newrelic.common.naming.truncateToDNSWithSuffix" ( dict "name" "<my-name>" "suffix" "my-suffix" ) }}
*/ -}}
{{- define "newrelic.common.naming.truncateToDNSWithSuffix" -}}
{{- $suffix := (include "newrelic.common.naming.trucateToDNS" .suffix) -}}
{{- $maxLen := (sub 63 (len $suffix)) -}}
{{- $newName := .name | trunc ($maxLen | int) | trimSuffix "-" -}}
{{- if $newName -}}
{{- printf "%s-%s" $newName $suffix -}}
{{- else -}}
{{ $suffix }}
{{- end -}}
{{- end -}}
{{/*
Expand the name of the chart.
Uses the Chart name by default if nameOverride is not set.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "newrelic.common.naming.name" -}}
{{- $name := .Values.nameOverride | default .Chart.Name -}}
{{- include "newrelic.common.naming.trucateToDNS" $name -}}
{{- end }}
{{/*
Create a default fully qualified app name.
By default the full name will be "<release_name>" just in if it has the chart name included in that, if not
it will be concatenated like "<release_name>-<chart_chart>". This could change if fullnameOverride or
nameOverride are set.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "newrelic.common.naming.fullname" -}}
{{- $name := include "newrelic.common.naming.name" . -}}
{{- if .Values.fullnameOverride -}}
{{- $name = .Values.fullnameOverride -}}
{{- else if not (contains $name .Release.Name) -}}
{{- $name = printf "%s-%s" .Release.Name $name -}}
{{- end -}}
{{- include "newrelic.common.naming.trucateToDNS" $name -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
This function should not be used for naming objects. Use "common.naming.{name,fullname}" instead.
*/}}
{{- define "newrelic.common.naming.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end }}

View File

@ -0,0 +1,10 @@
{{- /* Defines the Pod nodeSelector */ -}}
{{- define "newrelic.common.nodeSelector" -}}
{{- if .Values.nodeSelector -}}
{{- toYaml .Values.nodeSelector -}}
{{- else if .Values.global -}}
{{- if .Values.global.nodeSelector -}}
{{- toYaml .Values.global.nodeSelector -}}
{{- end -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,10 @@
{{- /* Defines the pod priorityClassName */ -}}
{{- define "newrelic.common.priorityClassName" -}}
{{- if .Values.priorityClassName -}}
{{- .Values.priorityClassName -}}
{{- else if .Values.global -}}
{{- if .Values.global.priorityClassName -}}
{{- .Values.global.priorityClassName -}}
{{- end -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,28 @@
{{- /*
This is a helper that returns whether the chart should assume the user is fine deploying privileged pods.
*/ -}}
{{- define "newrelic.common.privileged" -}}
{{- /* This allows us to use `$global` as an empty dict directly in case `Values.global` does not exists. */ -}}
{{- $global := index .Values "global" | default dict -}}
{{- /* `get` will return "" (empty string) if value is not found, and the value otherwise, so we can type-assert with kindIs */ -}}
{{- if get .Values "privileged" | kindIs "bool" -}}
{{- if .Values.privileged -}}
{{- .Values.privileged -}}
{{- end -}}
{{- else if get $global "privileged" | kindIs "bool" -}}
{{- if $global.privileged -}}
{{- $global.privileged -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- /* Return directly "true" or "false" based in the exist of "newrelic.common.privileged" */ -}}
{{- define "newrelic.common.privileged.value" -}}
{{- if include "newrelic.common.privileged" . -}}
true
{{- else -}}
false
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,10 @@
{{- /* Defines the proxy */ -}}
{{- define "newrelic.common.proxy" -}}
{{- if .Values.proxy -}}
{{- .Values.proxy -}}
{{- else if .Values.global -}}
{{- if .Values.global.proxy -}}
{{- .Values.global.proxy -}}
{{- end -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,23 @@
{{- /* Defines the container securityContext context */ -}}
{{- define "newrelic.common.securityContext.container" -}}
{{- $global := index .Values "global" | default dict -}}
{{- if .Values.containerSecurityContext -}}
{{- toYaml .Values.containerSecurityContext -}}
{{- else if $global.containerSecurityContext -}}
{{- toYaml $global.containerSecurityContext -}}
{{- end -}}
{{- end -}}
{{- /* Defines the pod securityContext context */ -}}
{{- define "newrelic.common.securityContext.pod" -}}
{{- $global := index .Values "global" | default dict -}}
{{- if .Values.podSecurityContext -}}
{{- toYaml .Values.podSecurityContext -}}
{{- else if $global.podSecurityContext -}}
{{- toYaml $global.podSecurityContext -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,90 @@
{{- /* Defines if the service account has to be created or not */ -}}
{{- define "newrelic.common.serviceAccount.create" -}}
{{- $valueFound := false -}}
{{- /* Look for a global creation of a service account */ -}}
{{- if get .Values "serviceAccount" | kindIs "map" -}}
{{- if (get .Values.serviceAccount "create" | kindIs "bool") -}}
{{- $valueFound = true -}}
{{- if .Values.serviceAccount.create -}}
{{- /*
We want only to return when this is true, returning `false` here will template "false" (string) when doing
an `(include "newrelic-logging.serviceAccount" .)`, which is not an "empty string" so it is `true` if it is used
as an evaluation somewhere else.
*/ -}}
{{- .Values.serviceAccount.create -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- /* Look for a local creation of a service account */ -}}
{{- if not $valueFound -}}
{{- /* This allows us to use `$global` as an empty dict directly in case `Values.global` does not exists */ -}}
{{- $global := index .Values "global" | default dict -}}
{{- if get $global "serviceAccount" | kindIs "map" -}}
{{- if get $global.serviceAccount "create" | kindIs "bool" -}}
{{- $valueFound = true -}}
{{- if $global.serviceAccount.create -}}
{{- $global.serviceAccount.create -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- /* In case no serviceAccount value has been found, default to "true" */ -}}
{{- if not $valueFound -}}
true
{{- end -}}
{{- end -}}
{{- /* Defines the name of the service account */ -}}
{{- define "newrelic.common.serviceAccount.name" -}}
{{- $localServiceAccount := "" -}}
{{- if get .Values "serviceAccount" | kindIs "map" -}}
{{- if (get .Values.serviceAccount "name" | kindIs "string") -}}
{{- $localServiceAccount = .Values.serviceAccount.name -}}
{{- end -}}
{{- end -}}
{{- $globalServiceAccount := "" -}}
{{- $global := index .Values "global" | default dict -}}
{{- if get $global "serviceAccount" | kindIs "map" -}}
{{- if get $global.serviceAccount "name" | kindIs "string" -}}
{{- $globalServiceAccount = $global.serviceAccount.name -}}
{{- end -}}
{{- end -}}
{{- if (include "newrelic.common.serviceAccount.create" .) -}}
{{- $localServiceAccount | default $globalServiceAccount | default (include "newrelic.common.naming.fullname" .) -}}
{{- else -}}
{{- $localServiceAccount | default $globalServiceAccount | default "default" -}}
{{- end -}}
{{- end -}}
{{- /* Merge the global and local annotations for the service account */ -}}
{{- define "newrelic.common.serviceAccount.annotations" -}}
{{- $localServiceAccount := dict -}}
{{- if get .Values "serviceAccount" | kindIs "map" -}}
{{- if get .Values.serviceAccount "annotations" -}}
{{- $localServiceAccount = .Values.serviceAccount.annotations -}}
{{- end -}}
{{- end -}}
{{- $globalServiceAccount := dict -}}
{{- $global := index .Values "global" | default dict -}}
{{- if get $global "serviceAccount" | kindIs "map" -}}
{{- if get $global.serviceAccount "annotations" -}}
{{- $globalServiceAccount = $global.serviceAccount.annotations -}}
{{- end -}}
{{- end -}}
{{- $merged := mustMergeOverwrite $globalServiceAccount $localServiceAccount -}}
{{- if $merged -}}
{{- toYaml $merged -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,39 @@
{{- /*
Abstraction of the nrStaging toggle.
This helper allows to override the global `.global.nrStaging` with the value of `.nrStaging`.
Returns "true" if `nrStaging` is enabled, otherwise "" (empty string)
*/ -}}
{{- define "newrelic.common.nrStaging" -}}
{{- /* `get` will return "" (empty string) if value is not found, and the value otherwise, so we can type-assert with kindIs */ -}}
{{- if (get .Values "nrStaging" | kindIs "bool") -}}
{{- if .Values.nrStaging -}}
{{- /*
We want only to return when this is true, returning `false` here will template "false" (string) when doing
an `(include "newrelic.common.nrStaging" .)`, which is not an "empty string" so it is `true` if it is used
as an evaluation somewhere else.
*/ -}}
{{- .Values.nrStaging -}}
{{- end -}}
{{- else -}}
{{- /* This allows us to use `$global` as an empty dict directly in case `Values.global` does not exists */ -}}
{{- $global := index .Values "global" | default dict -}}
{{- if get $global "nrStaging" | kindIs "bool" -}}
{{- if $global.nrStaging -}}
{{- $global.nrStaging -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- /*
Returns "true" of "false" directly instead of empty string (Helm falsiness) based on the exit of "newrelic.common.nrStaging"
*/ -}}
{{- define "newrelic.common.nrStaging.value" -}}
{{- if include "newrelic.common.nrStaging" . -}}
true
{{- else -}}
false
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,10 @@
{{- /* Defines the Pod tolerations */ -}}
{{- define "newrelic.common.tolerations" -}}
{{- if .Values.tolerations -}}
{{- toYaml .Values.tolerations -}}
{{- else if .Values.global -}}
{{- if .Values.global.tolerations -}}
{{- toYaml .Values.global.tolerations -}}
{{- end -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,54 @@
{{- /*
Abstraction of the verbose toggle.
This helper allows to override the global `.global.verboseLog` with the value of `.verboseLog`.
Returns "true" if `verbose` is enabled, otherwise "" (empty string)
*/ -}}
{{- define "newrelic.common.verboseLog" -}}
{{- /* `get` will return "" (empty string) if value is not found, and the value otherwise, so we can type-assert with kindIs */ -}}
{{- if (get .Values "verboseLog" | kindIs "bool") -}}
{{- if .Values.verboseLog -}}
{{- /*
We want only to return when this is true, returning `false` here will template "false" (string) when doing
an `(include "newrelic.common.verboseLog" .)`, which is not an "empty string" so it is `true` if it is used
as an evaluation somewhere else.
*/ -}}
{{- .Values.verboseLog -}}
{{- end -}}
{{- else -}}
{{- /* This allows us to use `$global` as an empty dict directly in case `Values.global` does not exists */ -}}
{{- $global := index .Values "global" | default dict -}}
{{- if get $global "verboseLog" | kindIs "bool" -}}
{{- if $global.verboseLog -}}
{{- $global.verboseLog -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- /*
Abstraction of the verbose toggle.
This helper abstracts the function "newrelic.common.verboseLog" to return true or false directly.
*/ -}}
{{- define "newrelic.common.verboseLog.valueAsBoolean" -}}
{{- if include "newrelic.common.verboseLog" . -}}
true
{{- else -}}
false
{{- end -}}
{{- end -}}
{{- /*
Abstraction of the verbose toggle.
This helper abstracts the function "newrelic.common.verboseLog" to return 1 or 0 directly.
*/ -}}
{{- define "newrelic.common.verboseLog.valueAsInt" -}}
{{- if include "newrelic.common.verboseLog" . -}}
1
{{- else -}}
0
{{- end -}}
{{- end -}}

View File

@ -0,0 +1 @@
# values are not needed for the library chart, however this file is still needed for helm lint to work.

View File

@ -0,0 +1,135 @@
global:
licenseKey: 1234567890abcdef1234567890abcdef12345678
cluster: test-cluster
common:
agentConfig:
# We set it in order for the kubelet to not crash when posting tho the agent. Since the License_Key is
# not valid, the Identity Api doesn't return an AgentID and the server from the Agent takes to long to respond
is_forward_only: true
config:
sink:
http:
timeout: 180s
customAttributes:
new: relic
loren: ipsum
# Disable KSM scraper as it is not enabled when testing this chart individually.
ksm:
enabled: false
# K8s DaemonSets update strategy.
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
enableProcessMetrics: "false"
serviceAccount:
create: true
podAnnotations:
annotation1: "annotation"
podLabels:
label1: "label"
securityContext:
runAsUser: 1000
runAsGroup: 2000
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
privileged: true
rbac:
create: true
pspEnabled: false
prefixDisplayNameWithCluster: false
useNodeNameAsDisplayName: true
integrations_config: []
kubelet:
enabled: true
annotations: {}
tolerations:
- operator: "Exists"
effect: "NoSchedule"
- operator: "Exists"
effect: "NoExecute"
extraEnv:
- name: ENV_VAR1
value: "var1"
- name: ENV_VAR2
value: "var2"
resources:
limits:
memory: 400M
requests:
cpu: 100m
memory: 180M
config:
scheme: "http"
controlPlane:
kind: Deployment
enabled: true
config:
etcd:
enabled: true
autodiscover:
- selector: "tier=control-plane,component=etcd"
namespace: kube-system
matchNode: true
endpoints:
- url: https://localhost:4001
insecureSkipVerify: true
auth:
type: bearer
- url: http://localhost:2381
scheduler:
enabled: true
autodiscover:
- selector: "tier=control-plane,component=kube-scheduler"
namespace: kube-system
matchNode: true
endpoints:
- url: https://localhost:10259
insecureSkipVerify: true
auth:
type: bearer
controllerManager:
enabled: true
autodiscover:
- selector: "tier=control-plane,component=kube-controller-manager"
namespace: kube-system
matchNode: true
endpoints:
- url: https://localhost:10257
insecureSkipVerify: true
auth:
type: bearer
mtls:
secretName: secret-name
secretNamespace: default
apiServer:
enabled: true
autodiscover:
- selector: "tier=control-plane,component=kube-apiserver"
namespace: kube-system
matchNode: true
endpoints:
- url: https://localhost:8443
insecureSkipVerify: true
auth:
type: bearer
mtls:
secretName: secret-name4
- url: http://localhost:8080
images:
integration:
tag: test
repository: e2e/nri-kubernetes

View File

@ -0,0 +1,134 @@
global:
licenseKey: 1234567890abcdef1234567890abcdef12345678
cluster: test-cluster
common:
agentConfig:
# We set it in order for the kubelet to not crash when posting tho the agent. Since the License_Key is
# not valid, the Identity Api doesn't return an AgentID and the server from the Agent takes to long to respond
is_forward_only: true
config:
sink:
http:
timeout: 180s
customAttributes:
new: relic
loren: ipsum
# Disable KSM scraper as it is not enabled when testing this chart individually.
ksm:
enabled: false
# K8s DaemonSets update strategy.
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
enableProcessMetrics: "false"
serviceAccount:
create: true
podAnnotations:
annotation1: "annotation"
podLabels:
label1: "label"
securityContext:
runAsUser: 1000
runAsGroup: 2000
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
privileged: true
rbac:
create: true
pspEnabled: false
prefixDisplayNameWithCluster: false
useNodeNameAsDisplayName: true
integrations_config: []
kubelet:
enabled: true
annotations: {}
tolerations:
- operator: "Exists"
effect: "NoSchedule"
- operator: "Exists"
effect: "NoExecute"
extraEnv:
- name: ENV_VAR1
value: "var1"
- name: ENV_VAR2
value: "var2"
resources:
limits:
memory: 400M
requests:
cpu: 100m
memory: 180M
config:
scheme: "http"
controlPlane:
enabled: true
config:
etcd:
enabled: true
autodiscover:
- selector: "tier=control-plane,component=etcd"
namespace: kube-system
matchNode: true
endpoints:
- url: https://localhost:4001
insecureSkipVerify: true
auth:
type: bearer
- url: http://localhost:2381
scheduler:
enabled: true
autodiscover:
- selector: "tier=control-plane,component=kube-scheduler"
namespace: kube-system
matchNode: true
endpoints:
- url: https://localhost:10259
insecureSkipVerify: true
auth:
type: bearer
controllerManager:
enabled: true
autodiscover:
- selector: "tier=control-plane,component=kube-controller-manager"
namespace: kube-system
matchNode: true
endpoints:
- url: https://localhost:10257
insecureSkipVerify: true
auth:
type: bearer
mtls:
secretName: secret-name
secretNamespace: default
apiServer:
enabled: true
autodiscover:
- selector: "tier=control-plane,component=kube-apiserver"
namespace: kube-system
matchNode: true
endpoints:
- url: https://localhost:8443
insecureSkipVerify: true
auth:
type: bearer
mtls:
secretName: secret-name4
- url: http://localhost:8080
images:
integration:
tag: test
repository: e2e/nri-kubernetes

View File

@ -0,0 +1,131 @@
{{- if not .Values.forceUnsupportedInterval }}
{{- $max := 40 }}
{{- $min := 10 }}
{{- if not (.Values.common.config.interval | hasSuffix "s") }}
{{ fail (printf "Interval must be between %ds and %ds" $min $max ) }}
{{- end }}
{{- if gt ( .Values.common.config.interval | trimSuffix "s" | int64 ) $max }}
{{ fail (printf "Intervals larger than %ds are not supported" $max) }}
{{- end }}
{{- if lt ( .Values.common.config.interval | trimSuffix "s" | int64 ) $min }}
{{ fail (printf "Intervals smaller than %ds are not supported" $min) }}
{{- end }}
{{- end }}
{{- if or (not .Values.ksm.enabled) (not .Values.kubelet.enabled) }}
Warning:
========
You have specified ksm or kubelet integration components as not enabled.
Those components are needed to have the full experience on NROne kubernetes explorer.
{{- end }}
{{- if and .Values.controlPlane.enabled (not (include "nriKubernetes.controlPlane.hostNetwork" .)) }}
Warning:
========
Most Control Plane components listen in the loopback address only, which is not reachable without `hostNetwork: true`.
Control plane autodiscovery might not work as expected.
You can enable hostNetwork for all pods by setting `global.hotNetwork`, `hostNetwork` or only for the control
plane pods by setting `controlPlane.hostNetwork: true`. Alternatively, you can disable control plane monitoring altogether with
`controlPlane.enabled: false`.
{{- end }}
{{- if and (include "newrelic.fargate" .) .Values.kubelet.affinity }}
Warning:
========
You have specified both an EKS Fargate environment (global.fargate) and custom
nodeAffinity rules, so we couldn't automatically exclude the kubelet daemonSet from
Fargate nodes. In order for the integration to work, you MUST manually exclude
the daemonSet from Fargate nodes.
Please make sure your `values.yaml' contains a .kubelet.affinity.nodeAffinity that achieve the same effect as:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: eks.amazonaws.com/compute-type
operator: NotIn
values:
- fargate
{{- end }}
{{- if and .Values.nodeAffinity .Values.controlPlane.enabled }}
WARNING: `nodeAffinity` is deprecated
=====================================
We have applied the old `nodeAffinity` to KSM and Kubelet components, but *NOT* to the control plane component as it
might conflict with the default nodeSelector.
This shimming will be removed in the future, please convert your `nodeAffinity` item into:
`ksm.affinity.nodeAffinity`, `controlPlane.affinity.nodeAffinity`, and `kubelet.affinity.nodeAffinity`.
{{- end }}
{{- if and .Values.integrations_config }}
WARNING: `integrations_config` is deprecated
============================================
We have automatically translated `integrations_config` to the new format, but this shimming will be removed in the
future. Please migrate your configs to the new format in the `integrations` key.
{{- end }}
{{- if or .Values.kubeStateMetricsScheme .Values.kubeStateMetricsPort .Values.kubeStateMetricsUrl .Values.kubeStateMetricsPodLabel .Values.kubeStateMetricsNamespace }}
WARNING: `kubeStateMetrics*` are deprecated
===========================================
We have automatically translated your `kubeStateMetrics*` values to the new format, but this shimming will be removed in
the future. Please migrate your configs to the new format in the `ksm.config` key.
{{- end }}
{{- if .Values.runAsUser }}
WARNING: `runAsUser` is deprecated
==================================
We have automatically translated your `runAsUser` setting to the new format, but this shimming will be removed in the
future. Please migrate your configs to the new format in the `securityContext` key.
{{- end }}
{{- if .Values.config }}
WARNING: `config` is deprecated
===============================
We have automatically translated your `config` setting to the new format, but this shimming will be removed in the
future. Please migrate your agent config to the new format in the `common.agentConfig` key.
{{- end }}
{{ $errors:= "" }}
{{- if .Values.logFile }}
{{ $errors = printf "%s\n\n%s" $errors (include "newrelic.compatibility.message.logFile" . ) }}
{{- end }}
{{- if .Values.resources }}
{{ $errors = printf "%s\n\n%s" $errors (include "newrelic.compatibility.message.resources" . ) }}
{{- end }}
{{- if .Values.image }}
{{ $errors = printf "%s\n\n%s" $errors (include "newrelic.compatibility.message.image" . ) }}
{{- end }}
{{- if .Values.enableWindows }}
{{ $errors = printf "%s\n\n%s" $errors (include "newrelic.compatibility.message.windows" . ) }}
{{- end }}
{{- if ( or .Values.controllerManagerEndpointUrl .Values.schedulerEndpointUrl .Values.etcdEndpointUrl .Values.apiServerEndpointUrl )}}
{{ $errors = printf "%s\n\n%s" $errors (include "newrelic.compatibility.message.apiURL" . ) }}
{{- end }}
{{- if ( or .Values.etcdTlsSecretName .Values.etcdTlsSecretNamespace )}}
{{ $errors = printf "%s\n\n%s" $errors (include "newrelic.compatibility.message.etcdSecrets" . ) }}
{{- end }}
{{- if .Values.apiServerSecurePort }}
{{ $errors = printf "%s\n\n%s" $errors (include "newrelic.compatibility.message.apiServerSecurePort" . ) }}
{{- end }}
{{- if $errors | trim}}
{{- fail (printf "\n\n%s\n%s" (include "newrelic.compatibility.message.common" . ) $errors ) }}
{{- end }}

View File

@ -0,0 +1,118 @@
{{/*
Create a default fully qualified app name.
This is a copy and paste from the common-library's name helper because the overriding system was broken.
As we have to change the logic to use "nrk8s" instead of `.Chart.Name` we need to maintain here a version
of the fullname helper
By default the full name will be "<release_name>" just in if it has "nrk8s" included in that, if not
it will be concatenated like "<release_name>-nrk8s". This could change if fullnameOverride or
nameOverride are set.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "nriKubernetes.naming.fullname" -}}
{{- $name := .Values.nameOverride | default "nrk8s" -}}
{{- if .Values.fullnameOverride -}}
{{- $name = .Values.fullnameOverride -}}
{{- else if not (contains $name .Release.Name) -}}
{{- $name = printf "%s-%s" .Release.Name $name -}}
{{- end -}}
{{- include "newrelic.common.naming.trucateToDNS" $name -}}
{{- end -}}
{{- /* Naming helpers*/ -}}
{{- define "nriKubernetes.naming.secrets" }}
{{- include "newrelic.common.naming.truncateToDNSWithSuffix" (dict "name" (include "nriKubernetes.naming.fullname" .) "suffix" "secrets") -}}
{{- end -}}
{{- /* Return a YAML with the mode to be added to the labels */ -}}
{{- define "nriKubernetes._mode" -}}
{{- if include "newrelic.common.privileged" . -}}
mode: privileged
{{- else -}}
mode: unprivileged
{{- end -}}
{{- end -}}
{{/*
Add `mode` label to the labels that come from the common library for all the objects
*/}}
{{- define "nriKubernetes.labels" -}}
{{- $labels := include "newrelic.common.labels" . | fromYaml -}}
{{- $mode := fromYaml ( include "nriKubernetes._mode" . ) -}}
{{- mustMergeOverwrite $labels $mode | toYaml -}}
{{- end -}}
{{/*
Add `mode` label to the labels that come from the common library for podLabels
*/}}
{{- define "nriKubernetes.labels.podLabels" -}}
{{- $labels := include "newrelic.common.labels.podLabels" . | fromYaml -}}
{{- $mode := fromYaml ( include "nriKubernetes._mode" . ) -}}
{{- mustMergeOverwrite $labels $mode | toYaml -}}
{{- end -}}
{{/*
Returns fargate
*/}}
{{- define "newrelic.fargate" -}}
{{- if .Values.fargate -}}
{{- .Values.fargate -}}
{{- else if .Values.global -}}
{{- if .Values.global.fargate -}}
{{- .Values.global.fargate -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- define "newrelic.integrationConfigDefaults" -}}
{{- if include "newrelic.common.lowDataMode" . -}}
interval: 30s
{{- else -}}
interval: 15s
{{- end -}}
{{- end -}}
{{- /* These are the defaults that are used for all the containers in this chart (except the kubelet's agent */ -}}
{{- define "nriKubernetes.securityContext.containerDefaults" -}}
runAsUser: 1000
runAsGroup: 2000
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
{{- end -}}
{{- /* Allow to change pod defaults dynamically based if we are running in privileged mode or not */ -}}
{{- define "nriKubernetes.securityContext.container" -}}
{{- $defaults := fromYaml ( include "nriKubernetes.securityContext.containerDefaults" . ) -}}
{{- $compatibilityLayer := include "newrelic.compatibility.securityContext" . | fromYaml -}}
{{- $commonLibrary := include "newrelic.common.securityContext.container" . | fromYaml -}}
{{- $finalSecurityContext := dict -}}
{{- if $commonLibrary -}}
{{- $finalSecurityContext = mustMergeOverwrite $commonLibrary $compatibilityLayer -}}
{{- else -}}
{{- $finalSecurityContext = mustMergeOverwrite $defaults $compatibilityLayer -}}
{{- end -}}
{{- toYaml $finalSecurityContext -}}
{{- end -}}

View File

@ -0,0 +1,199 @@
{{/*
Returns true if .Values.ksm.enabled is true and the legacy disableKubeStateMetrics is not set
*/}}
{{- define "newrelic.compatibility.ksm.enabled" -}}
{{- if and .Values.ksm.enabled (not .Values.disableKubeStateMetrics) -}}
true
{{- end -}}
{{- end -}}
{{/*
Returns legacy ksm values
*/}}
{{- define "newrelic.compatibility.ksm.legacyData" -}}
enabled: true
{{- if .Values.kubeStateMetricsScheme }}
scheme: {{ .Values.kubeStateMetricsScheme }}
{{- end -}}
{{- if .Values.kubeStateMetricsPort }}
port: {{ .Values.kubeStateMetricsPort }}
{{- end -}}
{{- if .Values.kubeStateMetricsUrl }}
staticURL: {{ .Values.kubeStateMetricsUrl }}
{{- end -}}
{{- if .Values.kubeStateMetricsPodLabel }}
selector: {{ printf "%s=kube-state-metrics" .Values.kubeStateMetricsPodLabel }}
{{- end -}}
{{- if .Values.kubeStateMetricsNamespace }}
namespace: {{ .Values.kubeStateMetricsNamespace}}
{{- end -}}
{{- end -}}
{{/*
Returns the new value if available, otherwise falling back on the legacy one
*/}}
{{- define "newrelic.compatibility.valueWithFallback" -}}
{{- if .supported }}
{{- toYaml .supported}}
{{- else if .legacy -}}
{{- toYaml .legacy}}
{{- end }}
{{- end -}}
{{/*
Returns a dictionary with legacy runAsUser config
*/}}
{{- define "newrelic.compatibility.securityContext" -}}
{{- if .Values.runAsUser -}}
{{ dict "runAsUser" .Values.runAsUser | toYaml }}
{{- end -}}
{{- end -}}
{{/*
Returns legacy annotations if available
*/}}
{{- define "newrelic.compatibility.annotations" -}}
{{- with .Values.daemonSet -}}
{{- with .annotations -}}
{{- toYaml . }}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Returns agent configmap merged with legacy config and legacy eventQueueDepth config
*/}}
{{- define "newrelic.compatibility.agentConfig" -}}
{{ $config:= (include "newrelic.compatibility.valueWithFallback" (dict "legacy" .Values.config "supported" .Values.common.agentConfig ) | fromYaml )}}
{{- if .Values.eventQueueDepth -}}
{{- mustMergeOverwrite $config (dict "event_queue_depth" .Values.eventQueueDepth ) | toYaml }}
{{- else -}}
{{- $config | toYaml }}
{{- end -}}
{{- end -}}
{{- /*
Return a valid podSpec.affinity object from the old `.Values.nodeAffinity`.
*/ -}}
{{- define "newrelic.compatibility.nodeAffinity" -}}
{{- if .Values.nodeAffinity -}}
nodeAffinity:
{{- toYaml .Values.nodeAffinity | nindent 2 }}
{{- end -}}
{{- end -}}
{{/*
Returns legacy integrations_config configmap data
*/}}
{{- define "newrelic.compatibility.integrations" -}}
{{- if .Values.integrations_config -}}
{{- range .Values.integrations_config }}
{{ .name -}}: |-
{{- toYaml .data | nindent 2 }}
{{- end -}}
{{- end -}}
{{- end -}}
{{- define "newrelic.compatibility.message.logFile" -}}
The 'logFile' option is no longer supported and has been replaced by:
- common.agentConfig.log_file.
------
{{- end -}}
{{- define "newrelic.compatibility.message.resources" -}}
You have specified the legacy 'resources' option in your values, which is not fully compatible with the v3 version.
This version deploys three different components and therefore you'll need to specify resources for each of them.
Please use
- ksm.resources,
- controlPlane.resources,
- kubelet.resources.
------
{{- end -}}
{{- define "newrelic.compatibility.message.apiServerSecurePort" -}}
You have specified the legacy 'apiServerSecurePort' option in your values, which is not fully compatible with the v3
version.
Please configure the API Server port as a part of 'apiServer.autodiscover[].endpoints'
------
{{- end -}}
{{- define "newrelic.compatibility.message.windows" -}}
nri-kubernetes v3 does not support deploying into windows Nodes.
Please use the latest 2.x version of the chart.
------
{{- end -}}
{{- define "newrelic.compatibility.message.etcdSecrets" -}}
Values "etcdTlsSecretName" and "etcdTlsSecretNamespace" are no longer supported, please specify them as a part of the
'etcd' config in the values, for example:
- endpoints:
- url: https://localhost:9979
insecureSkipVerify: true
auth:
type: mTLS
mtls:
secretName: {{ .Values.etcdTlsSecretName | default "etcdTlsSecretName"}}
secretNamespace: {{ .Values.etcdTlsSecretNamespace | default "etcdTlsSecretNamespace"}}
------
{{- end -}}
{{- define "newrelic.compatibility.message.apiURL" -}}
Values "controllerManagerEndpointUrl", "etcdEndpointUrl", "apiServerEndpointUrl", "schedulerEndpointUrl" are no longer
supported, please specify them as a part of the 'controlplane' config in the values, for example
autodiscover:
- selector: "tier=control-plane,component=etcd"
namespace: kube-system
matchNode: true
endpoints:
- url: https://localhost:4001
insecureSkipVerify: true
auth:
type: bearer
------
{{- end -}}
{{- define "newrelic.compatibility.message.image" -}}
Configuring image repository an tag under 'image' is no longer supported.
The following values are no longer supported and are currently ignored:
- image.repository
- image.tag
- image.pullPolicy
- image.pullSecrets
Notice that the 3.x version of the integration uses 3 different images.
Please set:
- images.forwarder.* to configure the infrastructure-agent forwarder.
- images.agent.* to configure the image bundling the infrastructure-agent and on-host integrations.
- images.integration.* to configure the image in charge of scraping k8s data.
------
{{- end -}}
{{- define "newrelic.compatibility.message.customAttributes" -}}
We still support using custom attributes but we support it as a map and dropped it as a string.
customAttributes: {{ .Values.customAttributes | quote }}
You should change your values to something like this:
customAttributes:
{{- range $k, $v := fromJson .Values.customAttributes -}}
{{- $k | nindent 2 }}: {{ $v | quote }}
{{- end }}
**NOTE**: If you read above errors like "invalid character ':' after top-level value" or "json: cannot unmarshal string into Go value of type map[string]interface {}" means that the string you have in your values is not a valid JSON, Helm is not able to parse it and we could not show you how you should change it. Sorry.
{{- end -}}
{{- define "newrelic.compatibility.message.common" -}}
######
The chart cannot be rendered since the values listed below are not supported. Please replace those with the new ones compatible with newrelic-infrastructure V3.
Keep in mind that the flag "--reuse-values" is not supported when migrating from V2 to V3.
Further information can be found in the official docs https://docs.newrelic.com/docs/kubernetes-pixie/kubernetes-integration/get-started/changes-since-v3#migration-guide"
######
{{- end -}}

View File

@ -0,0 +1,33 @@
{{- if .Values.rbac.create }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
{{- include "newrelic.common.labels" . | nindent 4 }}
name: {{ include "newrelic.common.naming.fullname" . }}
rules:
- apiGroups: [""]
resources:
- "nodes/metrics"
- "nodes/stats"
- "nodes/proxy"
verbs: ["get", "list"]
- apiGroups: [ "" ]
resources:
- "endpoints"
- "services"
- "nodes"
verbs: [ "get", "list", "watch" ]
- nonResourceURLs: ["/metrics"]
verbs: ["get"]
{{- if .Values.rbac.pspEnabled }}
- apiGroups:
- extensions
resources:
- podsecuritypolicies
resourceNames:
- privileged-{{ include "newrelic.common.naming.fullname" . }}
verbs:
- use
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,16 @@
{{- if .Values.rbac.create }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
{{- include "newrelic.common.labels" . | nindent 4 }}
name: {{ include "newrelic.common.naming.fullname" . }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ include "newrelic.common.naming.fullname" . }}
subjects:
- kind: ServiceAccount
name: {{ include "newrelic.common.serviceAccount.name" . }}
namespace: {{ .Release.Namespace }}
{{- end -}}

View File

@ -0,0 +1,11 @@
{{- /*
As this chart deploys what it should be three charts to maintain the transition to v3 as smooth as possible.
This means that this chart has 3 affinity so a helper should be done per scraper.
*/ -}}
{{- define "nriKubernetes.controlPlane.affinity" -}}
{{- if .Values.controlPlane.affinity -}}
{{- toYaml .Values.controlPlane.affinity -}}
{{- else if include "newrelic.common.affinity" . -}}
{{- include "newrelic.common.affinity" . -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,20 @@
{{- /*
Defaults for controlPlane's agent config
*/ -}}
{{- define "nriKubernetes.controlPlane.agentConfig.defaults" -}}
is_forward_only: true
overide_host_root: "" # Typo from here: https://github.com/newrelic/infrastructure-agent/blob/master/pkg/config/config.go#L267
http_server_enabled: true
http_server_port: 8001
{{- end -}}
{{- define "nriKubernetes.controlPlane.agentConfig" -}}
{{- $agentDefaults := fromYaml ( include "newrelic.common.agentConfig.defaults" . ) -}}
{{- $controlPlane := fromYaml ( include "nriKubernetes.controlPlane.agentConfig.defaults" . ) -}}
{{- $agentConfig := fromYaml ( include "newrelic.compatibility.agentConfig" . ) -}}
{{- $customAttributes := dict "custom_attributes" (dict "clusterName" (include "newrelic.common.cluster" . )) -}}
{{- mustMergeOverwrite $agentDefaults $controlPlane $agentConfig $customAttributes | toYaml -}}
{{- end -}}

View File

@ -0,0 +1,22 @@
{{/* Returns whether the controlPlane scraper should run with hostNetwork: true based on the user configuration. */}}
{{- define "nriKubernetes.controlPlane.hostNetwork" -}}
{{- /* `get` will return "" (empty string) if value is not found, and the value otherwise, so we can type-assert with kindIs */ -}}
{{- if get .Values.controlPlane "hostNetwork" | kindIs "bool" -}}
{{- if .Values.controlPlane.hostNetwork -}}
{{- .Values.controlPlane.hostNetwork -}}
{{- end -}}
{{- else if include "newrelic.common.hostNetwork" . -}}
{{- include "newrelic.common.hostNetwork" . -}}
{{- end -}}
{{- end -}}
{{/* Abstraction of "nriKubernetes.controlPlane.hostNetwork" that returns true of false directly */}}
{{- define "nriKubernetes.controlPlane.hostNetwork.value" -}}
{{- if include "nriKubernetes.controlPlane.hostNetwork" . -}}
true
{{- else -}}
false
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,8 @@
{{- /* Naming helpers*/ -}}
{{- define "nriKubernetes.controlplane.fullname" -}}
{{- include "newrelic.common.naming.truncateToDNSWithSuffix" (dict "name" (include "nriKubernetes.naming.fullname" .) "suffix" "controlplane") -}}
{{- end -}}
{{- define "nriKubernetes.controlplane.fullname.agent" -}}
{{- include "newrelic.common.naming.truncateToDNSWithSuffix" (dict "name" (include "nriKubernetes.naming.fullname" .) "suffix" "agent-controlplane") -}}
{{- end -}}

View File

@ -0,0 +1,40 @@
{{/*
Returns the list of namespaces where secrets need to be accessed by the controlPlane integration to do mTLS Auth
*/}}
{{- define "nriKubernetes.controlPlane.roleBindingNamespaces" -}}
{{ $namespaceList := list }}
{{- range $components := .Values.controlPlane.config }}
{{- if $components }}
{{- if kindIs "map" $components -}}
{{- if $components.staticEndpoint }}
{{- if $components.staticEndpoint.auth }}
{{- if $components.staticEndpoint.auth.mtls }}
{{- if $components.staticEndpoint.auth.mtls.secretNamespace }}
{{- $namespaceList = append $namespaceList $components.staticEndpoint.auth.mtls.secretNamespace -}}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- if $components.autodiscover }}
{{- range $autodiscover := $components.autodiscover }}
{{- if $autodiscover }}
{{- if $autodiscover.endpoints }}
{{- range $endpoint := $autodiscover.endpoints }}
{{- if $endpoint.auth }}
{{- if $endpoint.auth.mtls }}
{{- if $endpoint.auth.mtls.secretNamespace }}
{{- $namespaceList = append $namespaceList $endpoint.auth.mtls.secretNamespace -}}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
roleBindingNamespaces:
{{- uniq $namespaceList | toYaml | nindent 2 }}
{{- end -}}

View File

@ -0,0 +1,11 @@
{{- /*
As this chart deploys what it should be three charts to maintain the transition to v3 as smooth as possible.
This means that this chart has 3 tolerations so a helper should be done per scraper.
*/ -}}
{{- define "nriKubernetes.controlPlane.tolerations" -}}
{{- if .Values.controlPlane.tolerations -}}
{{- toYaml .Values.controlPlane.tolerations -}}
{{- else if include "newrelic.common.tolerations" . -}}
{{- include "newrelic.common.tolerations" . -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,18 @@
{{- if .Values.controlPlane.enabled -}}
{{- if .Values.customAttributes | kindIs "string" }}
{{- fail ( include "newrelic.compatibility.message.customAttributes" . ) -}}
{{- else -}}
apiVersion: v1
kind: ConfigMap
metadata:
namespace: {{ .Release.Namespace }}
labels:
{{- include "newrelic.common.labels" . | nindent 4 }}
name: {{ include "nriKubernetes.controlplane.fullname.agent" . }}
data:
newrelic-infra.yml: |-
# This is the configuration file for the infrastructure agent. See:
# https://docs.newrelic.com/docs/infrastructure/install-infrastructure-agent/configuration/infrastructure-agent-configuration-settings/
{{- include "nriKubernetes.controlPlane.agentConfig" . | nindent 4 }}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,47 @@
{{- if and (.Values.controlPlane.enabled) (.Values.rbac.create) }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
{{- include "newrelic.common.labels" . | nindent 4 }}
name: {{ include "nriKubernetes.controlplane.fullname" . }}
rules:
- apiGroups: [""]
resources:
- "nodes/metrics"
- "nodes/stats"
- "nodes/proxy"
verbs: ["get", "list"]
- apiGroups: [ "" ]
resources:
- "pods"
- "nodes"
verbs: [ "get", "list", "watch" ]
- nonResourceURLs: ["/metrics"]
verbs: ["get", "head"]
{{- if .Values.rbac.pspEnabled }}
- apiGroups:
- extensions
resources:
- podsecuritypolicies
resourceNames:
- privileged-{{ include "newrelic.common.naming.fullname" . }}
verbs:
- use
{{- end -}}
{{- $namespaces := include "nriKubernetes.controlPlane.roleBindingNamespaces" . | fromYaml -}}
{{- if $namespaces.roleBindingNamespaces}}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
{{- include "newrelic.common.labels" . | nindent 4 }}
name: {{ include "nriKubernetes.naming.secrets" . }}
rules:
- apiGroups: [""]
resources:
- "secrets"
verbs: ["get", "list", "watch"]
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,16 @@
{{- if and (.Values.controlPlane.enabled) (.Values.rbac.create) }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
{{- include "newrelic.common.labels" . | nindent 4 }}
name: {{ include "nriKubernetes.controlplane.fullname" . }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ include "nriKubernetes.controlplane.fullname" . }}
subjects:
- kind: ServiceAccount
name: {{ include "newrelic.common.serviceAccount.name" . }}-controlplane
namespace: {{ .Release.Namespace }}
{{- end -}}

View File

@ -0,0 +1,202 @@
{{- if and (.Values.controlPlane.enabled) (not (include "newrelic.fargate" .)) }}
apiVersion: apps/v1
kind: {{ .Values.controlPlane.kind }}
metadata:
namespace: {{ .Release.Namespace }}
labels:
{{- include "nriKubernetes.labels" . | nindent 4 }}
name: {{ include "nriKubernetes.controlplane.fullname" . }}
{{- $legacyAnnotation:= fromYaml (include "newrelic.compatibility.annotations" .) -}}
{{- with include "newrelic.compatibility.valueWithFallback" (dict "legacy" $legacyAnnotation "supported" .Values.controlPlane.annotations )}}
annotations: {{ . | nindent 4 }}
{{- end }}
spec:
{{- if eq .Values.controlPlane.kind "DaemonSet"}}
{{- with .Values.updateStrategy }}
updateStrategy: {{ toYaml . | nindent 4 }}
{{- end }}
{{- end }}
selector:
matchLabels:
{{- include "newrelic.common.labels.selectorLabels" . | nindent 6 }}
app.kubernetes.io/component: controlplane
template:
metadata:
annotations:
checksum/nri-kubernetes: {{ include (print $.Template.BasePath "/controlplane/scraper-configmap.yaml") . | sha256sum }}
checksum/agent-config: {{ include (print $.Template.BasePath "/controlplane/agent-configmap.yaml") . | sha256sum }}
{{- if include "newrelic.common.license.secret" . }}{{- /* If the is secret to template */}}
checksum/license-secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}
{{- end }}
{{- with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "nriKubernetes.labels.podLabels" . | nindent 8 }}
app.kubernetes.io/component: controlplane
spec:
{{- with include "newrelic.common.images.renderPullSecrets" ( dict "pullSecrets" (list .Values.images.pullSecrets) "context" .) }}
imagePullSecrets:
{{- . | nindent 8 }}
{{- end }}
{{- with include "newrelic.common.dnsConfig" . }}
dnsConfig:
{{- . | nindent 8 }}
{{- end }}
hostNetwork: {{ include "nriKubernetes.controlPlane.hostNetwork.value" . }}
{{- if include "nriKubernetes.controlPlane.hostNetwork" . }}
dnsPolicy: ClusterFirstWithHostNet
{{- end }}
{{- with include "newrelic.common.priorityClassName" . }}
priorityClassName: {{ . }}
{{- end }}
{{- with include "newrelic.common.securityContext.pod" . }}
securityContext:
{{- . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "newrelic.common.serviceAccount.name" . }}-controlplane
{{- if .Values.controlPlane.initContainers }}
initContainers: {{- tpl (.Values.controlPlane.initContainers | toYaml) . | nindent 8 }}
{{- end }}
containers:
- name: controlplane
image: {{ include "newrelic.common.images.image" ( dict "imageRoot" .Values.images.integration "context" .) }}
imagePullPolicy: {{ .Values.images.integration.pullPolicy }}
{{- with include "nriKubernetes.securityContext.container" . | fromYaml }}
securityContext:
{{- toYaml . | nindent 12 }}
{{- end }}
env:
- name: "NRI_KUBERNETES_SINK_HTTP_PORT"
value: {{ get (fromYaml (include "nriKubernetes.controlPlane.agentConfig" .)) "http_server_port" | quote }}
- name: "NRI_KUBERNETES_CLUSTERNAME"
value: {{ include "newrelic.common.cluster" . }}
- name: "NRI_KUBERNETES_VERBOSE"
value: {{ include "newrelic.common.verboseLog.valueAsBoolean" . | quote }}
- name: "NRI_KUBERNETES_NODENAME"
valueFrom:
fieldRef:
apiVersion: "v1"
fieldPath: "spec.nodeName"
- name: "NRI_KUBERNETES_NODEIP"
valueFrom:
fieldRef:
apiVersion: "v1"
fieldPath: "status.hostIP"
{{- with .Values.controlPlane.extraEnv }}
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.controlPlane.extraEnvFrom }}
envFrom: {{ toYaml . | nindent 12 }}
{{- end }}
volumeMounts:
- name: nri-kubernetes-config
mountPath: /etc/newrelic-infra/nri-kubernetes.yml
subPath: nri-kubernetes.yml
{{- with .Values.controlPlane.extraVolumeMounts }}
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.controlPlane.resources }}
resources: {{ toYaml . | nindent 12 }}
{{- end }}
- name: forwarder
image: {{ include "newrelic.common.images.image" ( dict "imageRoot" .Values.images.forwarder "context" .) }}
imagePullPolicy: {{ .Values.images.forwarder.pullPolicy }}
{{- with include "nriKubernetes.securityContext.container" . | fromYaml }}
securityContext:
{{- toYaml . | nindent 12 }}
{{- end }}
ports:
- containerPort: {{ get (fromYaml (include "nriKubernetes.controlPlane.agentConfig" .)) "http_server_port" }}
env:
- name: "NRIA_LICENSE_KEY"
valueFrom:
secretKeyRef:
name: {{ include "newrelic.common.license.secretName" . }}
key: {{ include "newrelic.common.license.secretKeyName" . }}
- name: "NRIA_OVERRIDE_HOSTNAME_SHORT"
valueFrom:
fieldRef:
apiVersion: "v1"
fieldPath: "spec.nodeName"
- name: "K8S_NODE_NAME"
valueFrom:
fieldRef:
apiVersion: "v1"
fieldPath: "spec.nodeName"
{{- if .Values.useNodeNameAsDisplayName }}
- name: "NRIA_DISPLAY_NAME"
{{- if .Values.prefixDisplayNameWithCluster }}
value: "{{ include "newrelic.common.cluster" . }}:$(K8S_NODE_NAME)"
{{- else }}
valueFrom:
fieldRef:
apiVersion: "v1"
fieldPath: "spec.nodeName"
{{- end }}
{{- end }}
{{- with .Values.controlPlane.extraEnv }}
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.controlPlane.extraEnvFrom }}
envFrom:
{{- toYaml . | nindent 12 }}
{{- end }}
volumeMounts:
- mountPath: /var/db/newrelic-infra/data
name: forwarder-tmpfs-data
- mountPath: /var/db/newrelic-infra/user_data
name: forwarder-tmpfs-user-data
- mountPath: /tmp
name: forwarder-tmpfs-tmp
- name: config
mountPath: /etc/newrelic-infra.yml
subPath: newrelic-infra.yml
{{- with .Values.controlPlane.extraVolumeMounts }}
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.controlPlane.resources }}
resources: {{ toYaml . | nindent 12 }}
{{- end }}
volumes:
- name: nri-kubernetes-config
configMap:
name: {{ include "nriKubernetes.controlplane.fullname" . }}
items:
- key: nri-kubernetes.yml
path: nri-kubernetes.yml
- name: forwarder-tmpfs-data
emptyDir: {}
- name: forwarder-tmpfs-user-data
emptyDir: {}
- name: forwarder-tmpfs-tmp
emptyDir: {}
- name: config
configMap:
name: {{ include "nriKubernetes.controlplane.fullname.agent" . }}
items:
- key: newrelic-infra.yml
path: newrelic-infra.yml
{{- with .Values.controlPlane.extraVolumes }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with include "nriKubernetes.controlPlane.affinity" . }}
affinity:
{{- . | nindent 8 }}
{{- end }}
{{- with include "nriKubernetes.controlPlane.tolerations" . }}
tolerations:
{{- . | nindent 8 }}
{{- end }}
{{- with .Values.controlPlane.nodeSelector | default (fromYaml (include "newrelic.common.nodeSelector" .)) }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end -}}
{{- end }}

Some files were not shown because too many files have changed in this diff Show More