Added chart versions:

kubemq/kubemq-cluster:
    - 2.4.0
  linkerd/linkerd-control-plane:
    - 2024.9.1
  linkerd/linkerd-crds:
    - 2024.9.1
  speedscale/speedscale-operator:
    - 2.2.372
pull/1059/head
github-actions[bot] 2024-09-07 00:52:44 +00:00
parent a22223baab
commit 2f77170aba
121 changed files with 19391 additions and 3 deletions

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,22 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,9 @@
dependencies:
- name: kubemq-crds
repository: https://kubemq-io.github.io/charts
version: 2.3.7
- name: kubemq-controller
repository: https://kubemq-io.github.io/charts
version: 1.9.3
digest: sha256:c9b644d18249502f1f7ceb749b408da3844e9a5005da89ed03079cbed68de63b
generated: "2023-03-18T13:35:49.5815949+02:00"

View File

@ -0,0 +1,17 @@
annotations:
catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: KubeMQ Cluster
catalog.cattle.io/kube-version: '>=1.21-0'
catalog.cattle.io/release-name: kubemq-cluster
apiVersion: v2
appVersion: 2.9.3
description: A Helm chart for KubeMQ Cluster, Kubernetes Message Queue Broker
icon: file://assets/icons/kubemq-cluster.svg
kubeVersion: '>=1.21-0'
maintainers:
- email: info@kubemq.io
name: KubeMQ
url: https://kubemq.io
name: kubemq-cluster
type: application
version: 2.4.0

View File

@ -0,0 +1,26 @@
# kubemq-cluster
`kubemq-cluster` is the Helm chart that installs the KubeMQ Cluster.
## Installing
For example:
```console
$ helm repo add kubemq-charts https://kubemq-io.github.io/charts
$ helm install --create-namespace -n kubemq kubemq-cluster kubemq-charts/kubemq-cluster
```
## Upgrading the charts
Please refer to the release notes of each version of the helm charts.
These can be found [here](https://github.com/kubemq/helm-charts/releases).
## Uninstalling the charts
To uninstall/delete kubemq-cluster use the following command:
```console
$ helm uninstall -n kubemq kubemq-cluster
```
The commands remove all the Kubernetes components associated with the chart.
If you want to keep the history use `--keep-history` flag.

View File

@ -0,0 +1,43 @@
# KubeMQ Charts
KubeMQ is a Cloud Native, enterprise grade message queue broker for distributed services architecture.
KubeMQ is delivered as a small, lightweight Docker container, designed for any type of workload and architecture running in Kubernetes or any other containers orchestration system which support Docker.
## HELM
KubeMQ Helm charts required Helm v3. Please download/upgrade from [https://github.com/helm/helm](https://github.com/helm/helm)
## Add KubeMQ Helm Repository
```
$ helm repo add kubemq-charts https://kubemq-io.github.io/charts
```
Verify KubeMQ helm repository charts is properly configured by:
## Update KubeMQ Helm Repository
```
$ helm repo update
```
## Install KubeMQ Cluster Chart
``` console
$ helm install kubemq-crds kubemq-charts/kubemq-crds
$ helm install --wait --create-namespace -n kubemq kubemq-controller kubemq-charts/kubemq-controller
$ helm install --wait -n kubemq kubemq-cluster --set key={your-license-key} kubemq-charts/kubemq-cluster
```
## Uninstall KubeMQ Cluster Chart
To uninstall/delete the kubemq-release deployment:
``` console
$ helm uninstall -n kubemq kubemq-cluster
$ helm uninstall -n kubemq kubemq-controller
$ helm uninstall kubemq-crds
```
```
## Documentation
Please visit [https://docs.kubemq.io](https://docs.kubemq.io) for more information about KubeMQ.

View File

@ -0,0 +1,37 @@
questions:
- variable: key
default: ""
required: true
label: KubeMQ Key
type: string
description: "KubeMQ Key - Register at https://kubemq.io"
group: "General Settings"
- variable: replicas
default: 3
required: true
label: replicas
type: int
description: "Number of replicas of KubeMQ Nodes"
group: "General Settings"
- variable: image.image
default: "kubemq/kubemq:latest"
required: false
label: Image Repository
type: string
description: "KubeMQ Image Repository"
group: "General Settings"
- variable: volume.size
default: ""
required: false
label: Persistent Volume Size
type: string
description: "You can set this to a specific size, e.g. 10Gi, or leave it blank for not using persistent storage"
group: "General Settings"
- variable: volume.storageClass
default: ""
required: false
label: Persistent Volume Storage Class
type: string
description: "You can set this to a specific storage class, e.g. local-path, or leave it blank for using the default storage class"
group: "General Settings"

View File

@ -0,0 +1,50 @@
{{/* vim: set filetype=mustache: */}}
{{/*{{- define "kubemq.name" -}}*/}}
{{/*{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}*/}}
{{/*{{- end -}}*/}}
{{- define "kubemq.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "kubemq.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Generate chart secret name
*/}}
{{- define "kubemq.secretName" -}}
{{ default (include "kubemq.fullname" .) .Values.existingSecret }}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "mychart.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "mychart.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{- define "kubemq.crbName" -}}
{{- printf "kubemq-operator-%s-crb" .Release.Namespace -}}
{{- end -}}

View File

@ -0,0 +1,21 @@
apiVersion: core.k8s.kubemq.io/v1beta1
kind: KubemqCluster
metadata:
name: {{ include "kubemq.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ include "kubemq.fullname" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
spec:
{{- if or .Values.key .Values.license }}
{{- if .Values.key }}
key: {{ .Values.key }}
{{- else if .Values.license }}
license: {{ .Values.license }}
{{- end }}
{{- else }}
{{- fail "Either .Values.key or .Values.license must be provided" }}
{{- end }}
{{ toYaml .Values | indent 2 }}

View File

@ -0,0 +1,12 @@
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubemq-cluster-rb
subjects:
- kind: ServiceAccount
name: kubemq-cluster
namespace: {{ .Release.Namespace }}
roleRef:
kind: Role
name: kubemq-cluster-role
apiGroup: rbac.authorization.k8s.io

View File

@ -0,0 +1,5 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: kubemq-cluster
namespace: {{ .Release.Namespace }}

View File

@ -0,0 +1 @@
key:

View File

@ -0,0 +1,117 @@
# Number of replicas of KubeMQ Nodes - https://docs.kubemq.io/configuration/cluster/default-template
replicas: 3
# KubeMQ license key
key: kubemq license key
# KubeMQ license data - https://docs.kubemq.io/configuration/cluster/set-license
license: kubemq license data
# KubeMQ Volume Configuration - https://docs.kubemq.io/configuration/cluster/set-persistence-volume
volume:
size: 10Gi
storageClass: default
# KubeMQ docker image - https://docs.kubemq.io/configuration/cluster/set-cluster-image
image:
image: kubemq/kubemq:latest
pullPolicy: Always
# KubeMQ Api interface - https://docs.kubemq.io/configuration/cluster/set-api-interface
api:
disabled: false
expose: NodePort
nodePort: 32080
port: 8080
# KubeMQ gRPC interface - https://docs.kubemq.io/configuration/cluster/set-grpc-interface
grpc:
disabled: false
expose: NodePort
nodePort: 32000
port: 50000
bodyLimit: 10000000
# KubeMQ REST interface - https://docs.kubemq.io/configuration/cluster/set-rest-interface
rest:
bodyLimit: 1000000
disabled: true
expose: NodePort
nodePort: 32090
port: 9090
# KubeMQ Authentication Configuration - https://docs.kubemq.io/configuration/cluster/set-authentication
authentication:
key: jwt
type: jwt token type
# KubeMQ Authorization Configuration - https://docs.kubemq.io/configuration/cluster/set-authorization
authorization:
autoReload: 300000
policy: policy type
url: remote url
# KubeMQ Health Configuration - https://docs.kubemq.io/configuration/cluster/set-health-probe
health:
failureThreshold: 3
initialDelaySeconds: 3
periodSeconds: 4
successThreshold: 1
timeoutSeconds: 10
# KubeMQ Logging Configuration - https://docs.kubemq.io/configuration/cluster/set-logs
log:
file: path to log file
level: 1
# KubeMQ NodeSelectors Configuration - https://docs.kubemq.io/configuration/cluster/set-node-selectors
nodeSelectors:
keys:
key: value
# KubeMQ Queue Configuration - https://docs.kubemq.io/configuration/cluster/set-queues-settings
queue:
defaultVisibilitySeconds: 0
defaultWaitTimeoutSeconds: 0
maxDelaySeconds: 0
maxExpirationSeconds: 0
maxReQueues: 0
maxReceiveMessagesRequest: 0
maxVisibilitySeconds: 0
maxWaitTimeoutSeconds: 0
# KubeMQ Resources Configuration - https://docs.kubemq.io/configuration/cluster/set-resources-limits
resources:
limitsCpu: "3"
limitsEphemeralStorage: 100Gi
limitsMemory: 2Gi
requestsCpu: "3"
requestsEphemeralStorage: 200Gi
requestsMemory: 4Gi
# KubeMQ Routing Configuration - https://docs.kubemq.io/configuration/cluster/set-routing
routing:
autoReload: 300000
data: routing data
url: routing url
# KubeMQ Cluster Configuration - when standalone is true, KubeMQ will run as a single node
standalone: false
# KubeMQ Store Configuration - https://docs.kubemq.io/configuration/cluster/set-store-settings
store:
clean: true
maxChannelSize: 0
maxChannels: 0
maxMessages: 0
maxSubscribers: 0
messagesRetentionMinutes: 0
path: path to store
purgeInactiveMinutes: 0
# KubeMQ TLS Configuration - https://docs.kubemq.io/configuration/cluster/set-tls
tls:
ca: ca data
cert: cert data
key: key data

View File

@ -2,7 +2,6 @@ annotations:
catalog.cattle.io/auto-install: linkerd-crds
catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: Linkerd Control Plane
catalog.cattle.io/featured: "5"
catalog.cattle.io/kube-version: '>=1.22.0-0'
catalog.cattle.io/release-name: linkerd-control-plane
apiVersion: v2

View File

@ -0,0 +1,22 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
OWNERS
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj

View File

@ -0,0 +1,6 @@
dependencies:
- name: partials
repository: file://../partials
version: 0.1.0
digest: sha256:8e42f9c9d4a2dc883f17f94d6044c97518ced19ad0922f47b8760e47135369ba
generated: "2021-12-06T11:42:50.784240359-05:00"

View File

@ -0,0 +1,29 @@
annotations:
catalog.cattle.io/auto-install: linkerd-crds
catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: Linkerd Control Plane
catalog.cattle.io/featured: "5"
catalog.cattle.io/kube-version: '>=1.22.0-0'
catalog.cattle.io/release-name: linkerd-control-plane
apiVersion: v2
appVersion: edge-24.9.1
dependencies:
- name: partials
repository: file://./charts/partials
version: 0.1.0
description: 'Linkerd gives you observability, reliability, and security for your
microservices — with no code change required. '
home: https://linkerd.io
icon: file://assets/icons/linkerd-control-plane.png
keywords:
- service-mesh
kubeVersion: '>=1.22.0-0'
maintainers:
- email: cncf-linkerd-dev@lists.cncf.io
name: Linkerd authors
url: https://linkerd.io/
name: linkerd-control-plane
sources:
- https://github.com/linkerd/linkerd2/
type: application
version: 2024.9.1

View File

@ -0,0 +1,321 @@
# linkerd-control-plane
Linkerd gives you observability, reliability, and security
for your microservices — with no code change required.
![Version: 2024.9.1](https://img.shields.io/badge/Version-2024.9.1-informational?style=flat-square)
![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
![AppVersion: edge-XX.X.X](https://img.shields.io/badge/AppVersion-edge--XX.X.X-informational?style=flat-square)
**Homepage:** <https://linkerd.io>
## Quickstart and documentation
You can run Linkerd on any Kubernetes cluster in a matter of seconds. See the
[Linkerd Getting Started Guide][getting-started] for how.
For more comprehensive documentation, start with the [Linkerd
docs][linkerd-docs].
## Prerequisite: linkerd-crds chart
Before installing this chart, please install the `linkerd-crds` chart, which
creates all the CRDs that the components from the current chart require.
## Prerequisite: identity certificates
The identity component of Linkerd requires setting up a trust anchor
certificate, and an issuer certificate with its key. These need to be provided
to Helm by the user (unlike when using the `linkerd install` CLI which can
generate these automatically). You can provide your own, or follow [these
instructions](https://linkerd.io/2/tasks/generate-certificates/) to generate new
ones.
Alternatively, both trust anchor and identity issuer certificates may be
derived from in-cluster resources. Existing CA (trust anchor) certificates
**must** live in a `ConfigMap` resource named `linkerd-identity-trust-roots`.
Issuer certificates **must** live in a `Secret` named
`linkerd-identity-issuer`. Both resources should exist in the control-plane's
install namespace. In order to use an existing CA, Linkerd needs to be
installed with `identity.externalCA=true`. To use an existing issuer
certificate, Linkerd should be installed with
`identity.issuer.scheme=kubernetes.io/tls`.
A more comprehensive description is in the [automatic certificate rotation
guide](https://linkerd.io/2.12/tasks/automatically-rotating-control-plane-tls-credentials/#a-note-on-third-party-cert-management-solutions).
Note that the provided certificates must be ECDSA certificates.
## Adding Linkerd's Helm repository
Included here for completeness-sake, but should have already been added when
`linkerd-base` was installed.
```bash
# To add the repo for Linkerd edge releases:
helm repo add linkerd https://helm.linkerd.io/edge
```
## Installing the chart
You must provide the certificates and keys described in the preceding section,
and the same expiration date you used to generate the Issuer certificate.
```bash
helm install linkerd-control-plane -n linkerd \
--set-file identityTrustAnchorsPEM=ca.crt \
--set-file identity.issuer.tls.crtPEM=issuer.crt \
--set-file identity.issuer.tls.keyPEM=issuer.key \
linkerd/linkerd-control-plane
```
Note that you require to install this chart in the same namespace you installed
the `linkerd-base` chart.
## Setting High-Availability
Besides the default `values.yaml` file, the chart provides a `values-ha.yaml`
file that overrides some default values as to set things up under a
high-availability scenario, analogous to the `--ha` option in `linkerd install`.
Values such as higher number of replicas, higher memory/cpu limits and
affinities are specified in that file.
You can get ahold of `values-ha.yaml` by fetching the chart files:
```bash
helm fetch --untar linkerd/linkerd-control-plane
```
Then use the `-f` flag to provide the override file, for example:
```bash
helm install linkerd-control-plane -n linkerd \
--set-file identityTrustAnchorsPEM=ca.crt \
--set-file identity.issuer.tls.crtPEM=issuer.crt \
--set-file identity.issuer.tls.keyPEM=issuer.key \
-f linkerd2/values-ha.yaml
linkerd/linkerd-control-plane
```
## Get involved
* Check out Linkerd's source code at [GitHub][linkerd2].
* Join Linkerd's [user mailing list][linkerd-users], [developer mailing
list][linkerd-dev], and [announcements mailing list][linkerd-announce].
* Follow [@linkerd][twitter] on Twitter.
* Join the [Linkerd Slack][slack].
[getting-started]: https://linkerd.io/2/getting-started/
[linkerd2]: https://github.com/linkerd/linkerd2
[linkerd-announce]: https://lists.cncf.io/g/cncf-linkerd-announce
[linkerd-dev]: https://lists.cncf.io/g/cncf-linkerd-dev
[linkerd-docs]: https://linkerd.io/2/overview/
[linkerd-users]: https://lists.cncf.io/g/cncf-linkerd-users
[slack]: http://slack.linkerd.io
[twitter]: https://twitter.com/linkerd
## Extensions for Linkerd
The current chart installs the core Linkerd components, which grant you
reliability and security features. Other functionality is available through
extensions. Check the corresponding docs for each one of the following
extensions:
* Observability:
[Linkerd-viz](https://github.com/linkerd/linkerd2/blob/main/viz/charts/linkerd-viz/README.md)
* Multicluster:
[Linkerd-multicluster](https://github.com/linkerd/linkerd2/blob/main/multicluster/charts/linkerd-multicluster/README.md)
* Tracing:
[Linkerd-jaeger](https://github.com/linkerd/linkerd2/blob/main/jaeger/charts/linkerd-jaeger/README.md)
## Requirements
Kubernetes: `>=1.22.0-0`
| Repository | Name | Version |
|------------|------|---------|
| file://../partials | partials | 0.1.0 |
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| clusterDomain | string | `"cluster.local"` | Kubernetes DNS Domain name to use |
| clusterNetworks | string | `"10.0.0.0/8,100.64.0.0/10,172.16.0.0/12,192.168.0.0/16,fd00::/8"` | The cluster networks for which service discovery is performed. This should include the pod and service networks, but need not include the node network. By default, all IPv4 private networks and all accepted IPv6 ULAs are specified so that resolution works in typical Kubernetes environments. |
| cniEnabled | bool | `false` | enabling this omits the NET_ADMIN capability in the PSP and the proxy-init container when injecting the proxy; requires the linkerd-cni plugin to already be installed |
| commonLabels | object | `{}` | Labels to apply to all resources |
| controlPlaneTracing | bool | `false` | enables control plane tracing |
| controlPlaneTracingNamespace | string | `"linkerd-jaeger"` | namespace to send control plane traces to |
| controller.podDisruptionBudget | object | `{"maxUnavailable":1}` | sets pod disruption budget parameter for all deployments |
| controller.podDisruptionBudget.maxUnavailable | int | `1` | Maximum number of pods that can be unavailable during disruption |
| controllerGID | int | `-1` | Optional customisation of the group ID for the control plane components (the group ID will be omitted if lower than 0) |
| controllerImage | string | `"cr.l5d.io/linkerd/controller"` | Docker image for the destination and identity components |
| controllerImageVersion | string | `""` | Optionally allow a specific container image Tag (or SHA) to be specified for the controllerImage. |
| controllerLogFormat | string | `"plain"` | Log format for the control plane components |
| controllerLogLevel | string | `"info"` | Log level for the control plane components |
| controllerReplicas | int | `1` | Number of replicas for each control plane pod |
| controllerUID | int | `2103` | User ID for the control plane components |
| debugContainer.image.name | string | `"cr.l5d.io/linkerd/debug"` | Docker image for the debug container |
| debugContainer.image.pullPolicy | string | imagePullPolicy | Pull policy for the debug container image |
| debugContainer.image.version | string | linkerdVersion | Tag for the debug container image |
| deploymentStrategy | object | `{"rollingUpdate":{"maxSurge":"25%","maxUnavailable":"25%"}}` | default kubernetes deployment strategy |
| destinationController.livenessProbe.timeoutSeconds | int | `1` | |
| destinationController.meshedHttp2ClientProtobuf.keep_alive.interval.seconds | int | `10` | |
| destinationController.meshedHttp2ClientProtobuf.keep_alive.timeout.seconds | int | `3` | |
| destinationController.meshedHttp2ClientProtobuf.keep_alive.while_idle | bool | `true` | |
| destinationController.readinessProbe.timeoutSeconds | int | `1` | |
| disableHeartBeat | bool | `false` | Set to true to not start the heartbeat cronjob |
| disableIPv6 | bool | `true` | disables routing IPv6 traffic in addition to IPv4 traffic through the proxy (IPv6 routing only available as of proxy-init v2.3.0 and linkerd-cni v1.4.0) |
| enableEndpointSlices | bool | `true` | enables the use of EndpointSlice informers for the destination service; enableEndpointSlices should be set to true only if EndpointSlice K8s feature gate is on |
| enableH2Upgrade | bool | `true` | Allow proxies to perform transparent HTTP/2 upgrading |
| enablePSP | bool | `false` | Add a PSP resource and bind it to the control plane ServiceAccounts. Note PSP has been deprecated since k8s v1.21 |
| enablePodAntiAffinity | bool | `false` | enables pod anti affinity creation on deployments for high availability |
| enablePodDisruptionBudget | bool | `false` | enables the creation of pod disruption budgets for control plane components |
| enablePprof | bool | `false` | enables the use of pprof endpoints on control plane component's admin servers |
| identity.externalCA | bool | `false` | If the linkerd-identity-trust-roots ConfigMap has already been created |
| identity.issuer.clockSkewAllowance | string | `"20s"` | Amount of time to allow for clock skew within a Linkerd cluster |
| identity.issuer.issuanceLifetime | string | `"24h0m0s"` | Amount of time for which the Identity issuer should certify identity |
| identity.issuer.scheme | string | `"linkerd.io/tls"` | |
| identity.issuer.tls | object | `{"crtPEM":"","keyPEM":""}` | Which scheme is used for the identity issuer secret format |
| identity.issuer.tls.crtPEM | string | `""` | Issuer certificate (ECDSA). It must be provided during install. |
| identity.issuer.tls.keyPEM | string | `""` | Key for the issuer certificate (ECDSA). It must be provided during install |
| identity.kubeAPI.clientBurst | int | `200` | Burst value over clientQPS |
| identity.kubeAPI.clientQPS | int | `100` | Maximum QPS sent to the kube-apiserver before throttling. See [token bucket rate limiter implementation](https://github.com/kubernetes/client-go/blob/v12.0.0/util/flowcontrol/throttle.go) |
| identity.livenessProbe.timeoutSeconds | int | `1` | |
| identity.readinessProbe.timeoutSeconds | int | `1` | |
| identity.serviceAccountTokenProjection | bool | `true` | Use [Service Account token Volume projection](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#service-account-token-volume-projection) for pod validation instead of the default token |
| identityTrustAnchorsPEM | string | `""` | Trust root certificate (ECDSA). It must be provided during install. |
| identityTrustDomain | string | clusterDomain | Trust domain used for identity |
| imagePullPolicy | string | `"IfNotPresent"` | Docker image pull policy |
| imagePullSecrets | list | `[]` | For Private docker registries, authentication is needed. Registry secrets are applied to the respective service accounts |
| kubeAPI.clientBurst | int | `200` | Burst value over clientQPS |
| kubeAPI.clientQPS | int | `100` | Maximum QPS sent to the kube-apiserver before throttling. See [token bucket rate limiter implementation](https://github.com/kubernetes/client-go/blob/v12.0.0/util/flowcontrol/throttle.go) |
| linkerdVersion | string | `"linkerdVersionValue"` | control plane version. See Proxy section for proxy version |
| networkValidator.connectAddr | string | `""` | Address to which the network-validator will attempt to connect. This should be an IP that the cluster is expected to be able to reach but a port it should not, e.g., a public IP for public clusters and a private IP for air-gapped clusters with a port like 20001. If empty, defaults to 1.1.1.1:20001 and [fd00::1]:20001 for IPv4 and IPv6 respectively. |
| networkValidator.enableSecurityContext | bool | `true` | Include a securityContext in the network-validator pod spec |
| networkValidator.listenAddr | string | `""` | Address to which network-validator listens to requests from itself. If empty, defaults to 0.0.0.0:4140 and [::]:4140 for IPv4 and IPv6 respectively. |
| networkValidator.logFormat | string | plain | Log format (`plain` or `json`) for network-validator |
| networkValidator.logLevel | string | debug | Log level for the network-validator |
| networkValidator.timeout | string | `"10s"` | Timeout before network-validator fails to validate the pod's network connectivity |
| nodeSelector | object | `{"kubernetes.io/os":"linux"}` | NodeSelector section, See the [K8S documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector) for more information |
| podAnnotations | object | `{}` | Additional annotations to add to all pods |
| podLabels | object | `{}` | Additional labels to add to all pods |
| podMonitor.controller.enabled | bool | `true` | Enables the creation of PodMonitor for the control-plane |
| podMonitor.controller.namespaceSelector | string | `"matchNames:\n - {{ .Release.Namespace }}\n - linkerd-viz\n - linkerd-jaeger\n"` | Selector to select which namespaces the Endpoints objects are discovered from |
| podMonitor.enabled | bool | `false` | Enables the creation of Prometheus Operator [PodMonitor](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.PodMonitor) |
| podMonitor.labels | object | `{}` | Labels to apply to all pod Monitors |
| podMonitor.proxy.enabled | bool | `true` | Enables the creation of PodMonitor for the data-plane |
| podMonitor.scrapeInterval | string | `"10s"` | Interval at which metrics should be scraped |
| podMonitor.scrapeTimeout | string | `"10s"` | Iimeout after which the scrape is ended |
| podMonitor.serviceMirror.enabled | bool | `true` | Enables the creation of PodMonitor for the Service Mirror component |
| policyController.image.name | string | `"cr.l5d.io/linkerd/policy-controller"` | Docker image for the policy controller |
| policyController.image.pullPolicy | string | imagePullPolicy | Pull policy for the policy controller container image |
| policyController.image.version | string | linkerdVersion | Tag for the policy controller container image |
| policyController.livenessProbe.timeoutSeconds | int | `1` | |
| policyController.logLevel | string | `"info"` | Log level for the policy controller |
| policyController.probeNetworks | list | `["0.0.0.0/0","::/0"]` | The networks from which probes are performed. By default, all networks are allowed so that all probes are authorized. |
| policyController.readinessProbe.timeoutSeconds | int | `1` | |
| policyController.resources | object | `{"cpu":{"limit":"","request":""},"ephemeral-storage":{"limit":"","request":""},"memory":{"limit":"","request":""}}` | policy controller resource requests & limits |
| policyController.resources.cpu.limit | string | `""` | Maximum amount of CPU units that the policy controller can use |
| policyController.resources.cpu.request | string | `""` | Amount of CPU units that the policy controller requests |
| policyController.resources.ephemeral-storage.limit | string | `""` | Maximum amount of ephemeral storage that the policy controller can use |
| policyController.resources.ephemeral-storage.request | string | `""` | Amount of ephemeral storage that the policy controller requests |
| policyController.resources.memory.limit | string | `""` | Maximum amount of memory that the policy controller can use |
| policyController.resources.memory.request | string | `""` | Maximum amount of memory that the policy controller requests |
| policyValidator.caBundle | string | `""` | Bundle of CA certificates for proxy injector. If not provided nor injected with cert-manager, then Helm will use the certificate generated for `policyValidator.crtPEM`. If `policyValidator.externalSecret` is set to true, this value, injectCaFrom, or injectCaFromSecret must be set, as no certificate will be generated. See the cert-manager [CA Injector Docs](https://cert-manager.io/docs/concepts/ca-injector) for more information. |
| policyValidator.crtPEM | string | `""` | Certificate for the policy validator. If not provided and not using an external secret then Helm will generate one. |
| policyValidator.externalSecret | bool | `false` | Do not create a secret resource for the policyValidator webhook. If this is set to `true`, the value `policyValidator.caBundle` must be set or the ca bundle must injected with cert-manager ca injector using `policyValidator.injectCaFrom` or `policyValidator.injectCaFromSecret` (see below). |
| policyValidator.injectCaFrom | string | `""` | Inject the CA bundle from a cert-manager Certificate. See the cert-manager [CA Injector Docs](https://cert-manager.io/docs/concepts/ca-injector/#injecting-ca-data-from-a-certificate-resource) for more information. |
| policyValidator.injectCaFromSecret | string | `""` | Inject the CA bundle from a Secret. If set, the `cert-manager.io/inject-ca-from-secret` annotation will be added to the webhook. The Secret must have the CA Bundle stored in the `ca.crt` key and have the `cert-manager.io/allow-direct-injection` annotation set to `true`. See the cert-manager [CA Injector Docs](https://cert-manager.io/docs/concepts/ca-injector/#injecting-ca-data-from-a-secret-resource) for more information. |
| policyValidator.keyPEM | string | `""` | Certificate key for the policy validator. If not provided and not using an external secret then Helm will generate one. |
| policyValidator.namespaceSelector | object | `{"matchExpressions":[{"key":"config.linkerd.io/admission-webhooks","operator":"NotIn","values":["disabled"]}]}` | Namespace selector used by admission webhook |
| priorityClassName | string | `""` | Kubernetes priorityClassName for the Linkerd Pods |
| profileValidator.caBundle | string | `""` | Bundle of CA certificates for proxy injector. If not provided nor injected with cert-manager, then Helm will use the certificate generated for `profileValidator.crtPEM`. If `profileValidator.externalSecret` is set to true, this value, injectCaFrom, or injectCaFromSecret must be set, as no certificate will be generated. See the cert-manager [CA Injector Docs](https://cert-manager.io/docs/concepts/ca-injector) for more information. |
| profileValidator.crtPEM | string | `""` | Certificate for the service profile validator. If not provided and not using an external secret then Helm will generate one. |
| profileValidator.externalSecret | bool | `false` | Do not create a secret resource for the profileValidator webhook. If this is set to `true`, the value `proxyInjector.caBundle` must be set or the ca bundle must injected with cert-manager ca injector using `proxyInjector.injectCaFrom` or `proxyInjector.injectCaFromSecret` (see below). |
| profileValidator.injectCaFrom | string | `""` | Inject the CA bundle from a cert-manager Certificate. See the cert-manager [CA Injector Docs](https://cert-manager.io/docs/concepts/ca-injector/#injecting-ca-data-from-a-certificate-resource) for more information. |
| profileValidator.injectCaFromSecret | string | `""` | Inject the CA bundle from a Secret. If set, the `cert-manager.io/inject-ca-from-secret` annotation will be added to the webhook. The Secret must have the CA Bundle stored in the `ca.crt` key and have the `cert-manager.io/allow-direct-injection` annotation set to `true`. See the cert-manager [CA Injector Docs](https://cert-manager.io/docs/concepts/ca-injector/#injecting-ca-data-from-a-secret-resource) for more information. |
| profileValidator.keyPEM | string | `""` | Certificate key for the service profile validator. If not provided and not using an external secret then Helm will generate one. |
| profileValidator.namespaceSelector | object | `{"matchExpressions":[{"key":"config.linkerd.io/admission-webhooks","operator":"NotIn","values":["disabled"]}]}` | Namespace selector used by admission webhook |
| prometheusUrl | string | `""` | url of external prometheus instance (used for the heartbeat) |
| proxy.await | bool | `true` | If set, the application container will not start until the proxy is ready |
| proxy.control.streams.idleTimeout | string | `"5m"` | The timeout between consecutive updates from the control plane. |
| proxy.control.streams.initialTimeout | string | `"3s"` | The timeout for the first update from the control plane. |
| proxy.control.streams.lifetime | string | `"1h"` | The maximum duration for a response stream (i.e. before it will be reinitialized). |
| proxy.cores | int | `0` | The `cpu.limit` and `cores` should be kept in sync. The value of `cores` must be an integer and should typically be set by rounding up from the limit. E.g. if cpu.limit is '1500m', cores should be 2. |
| proxy.defaultInboundPolicy | string | "all-unauthenticated" | The default allow policy to use when no `Server` selects a pod. One of: "all-authenticated", "all-unauthenticated", "cluster-authenticated", "cluster-unauthenticated", "deny", "audit" |
| proxy.disableInboundProtocolDetectTimeout | bool | `false` | When set to true, disables the protocol detection timeout on the inbound side of the proxy by setting it to a very high value |
| proxy.disableOutboundProtocolDetectTimeout | bool | `false` | When set to true, disables the protocol detection timeout on the outbound side of the proxy by setting it to a very high value |
| proxy.enableExternalProfiles | bool | `false` | Enable service profiles for non-Kubernetes services |
| proxy.enableShutdownEndpoint | bool | `false` | Enables the proxy's /shutdown admin endpoint |
| proxy.gid | int | `-1` | Optional customisation of the group id under which the proxy runs (the group ID will be omitted if lower than 0) |
| proxy.image.name | string | `"cr.l5d.io/linkerd/proxy"` | Docker image for the proxy |
| proxy.image.pullPolicy | string | imagePullPolicy | Pull policy for the proxy container image |
| proxy.image.version | string | linkerdVersion | Tag for the proxy container image |
| proxy.inbound.server.http2.keepAliveInterval | string | `"10s"` | The interval at which PINGs are issued to remote HTTP/2 clients. |
| proxy.inbound.server.http2.keepAliveTimeout | string | `"3s"` | The timeout within which keep-alive PINGs must be acknowledged on inbound HTTP/2 connections. |
| proxy.inboundConnectTimeout | string | `"100ms"` | Maximum time allowed for the proxy to establish an inbound TCP connection |
| proxy.inboundDiscoveryCacheUnusedTimeout | string | `"90s"` | Maximum time allowed before an unused inbound discovery result is evicted from the cache |
| proxy.livenessProbe | object | `{"initialDelaySeconds":10,"timeoutSeconds":1}` | LivenessProbe timeout and delay configuration |
| proxy.logFormat | string | `"plain"` | Log format (`plain` or `json`) for the proxy |
| proxy.logHTTPHeaders | `off` or `insecure` | `"off"` | If set to `off`, will prevent the proxy from logging HTTP headers. If set to `insecure`, HTTP headers may be logged verbatim. Note that setting this to `insecure` is not alone sufficient to log HTTP headers; the proxy logLevel must also be set to debug. |
| proxy.logLevel | string | `"warn,linkerd=info,hickory=error"` | Log level for the proxy |
| proxy.nativeSidecar | bool | `false` | Enable KEP-753 native sidecars This is an experimental feature. It requires Kubernetes >= 1.29. If enabled, .proxy.waitBeforeExitSeconds should not be used. |
| proxy.opaquePorts | string | `"25,587,3306,4444,5432,6379,9300,11211"` | Default set of opaque ports - SMTP (25,587) server-first - MYSQL (3306) server-first - Galera (4444) server-first - PostgreSQL (5432) server-first - Redis (6379) server-first - ElasticSearch (9300) server-first - Memcached (11211) clients do not issue any preamble, which breaks detection |
| proxy.outbound.server.http2.keepAliveInterval | string | `"10s"` | The interval at which PINGs are issued to local application HTTP/2 clients. |
| proxy.outbound.server.http2.keepAliveTimeout | string | `"3s"` | The timeout within which keep-alive PINGs must be acknowledged on outbound HTTP/2 connections. |
| proxy.outboundConnectTimeout | string | `"1000ms"` | Maximum time allowed for the proxy to establish an outbound TCP connection |
| proxy.outboundDiscoveryCacheUnusedTimeout | string | `"5s"` | Maximum time allowed before an unused outbound discovery result is evicted from the cache |
| proxy.ports.admin | int | `4191` | Admin port for the proxy container |
| proxy.ports.control | int | `4190` | Control port for the proxy container |
| proxy.ports.inbound | int | `4143` | Inbound port for the proxy container |
| proxy.ports.outbound | int | `4140` | Outbound port for the proxy container |
| proxy.readinessProbe | object | `{"initialDelaySeconds":2,"timeoutSeconds":1}` | ReadinessProbe timeout and delay configuration |
| proxy.requireIdentityOnInboundPorts | string | `""` | |
| proxy.resources.cpu.limit | string | `""` | Maximum amount of CPU units that the proxy can use |
| proxy.resources.cpu.request | string | `""` | Amount of CPU units that the proxy requests |
| proxy.resources.ephemeral-storage.limit | string | `""` | Maximum amount of ephemeral storage that the proxy can use |
| proxy.resources.ephemeral-storage.request | string | `""` | Amount of ephemeral storage that the proxy requests |
| proxy.resources.memory.limit | string | `""` | Maximum amount of memory that the proxy can use |
| proxy.resources.memory.request | string | `""` | Maximum amount of memory that the proxy requests |
| proxy.shutdownGracePeriod | string | `""` | Grace period for graceful proxy shutdowns. If this timeout elapses before all open connections have completed, the proxy will terminate forcefully, closing any remaining connections. |
| proxy.startupProbe.failureThreshold | int | `120` | |
| proxy.startupProbe.initialDelaySeconds | int | `0` | |
| proxy.startupProbe.periodSeconds | int | `1` | |
| proxy.uid | int | `2102` | User id under which the proxy runs |
| proxy.waitBeforeExitSeconds | int | `0` | If set the injected proxy sidecars in the data plane will stay alive for at least the given period before receiving the SIGTERM signal from Kubernetes but no longer than the pod's `terminationGracePeriodSeconds`. See [Lifecycle hooks](https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks) for more info on container lifecycle hooks. |
| proxyInit.closeWaitTimeoutSecs | int | `0` | |
| proxyInit.ignoreInboundPorts | string | `"4567,4568"` | Default set of inbound ports to skip via iptables - Galera (4567,4568) |
| proxyInit.ignoreOutboundPorts | string | `"4567,4568"` | Default set of outbound ports to skip via iptables - Galera (4567,4568) |
| proxyInit.image.name | string | `"cr.l5d.io/linkerd/proxy-init"` | Docker image for the proxy-init container |
| proxyInit.image.pullPolicy | string | imagePullPolicy | Pull policy for the proxy-init container image |
| proxyInit.image.version | string | `"v2.4.1"` | Tag for the proxy-init container image |
| proxyInit.iptablesMode | string | `"legacy"` | Variant of iptables that will be used to configure routing. Currently, proxy-init can be run either in 'nft' or in 'legacy' mode. The mode will control which utility binary will be called. The host must support whichever mode will be used |
| proxyInit.kubeAPIServerPorts | string | `"443,6443"` | Default set of ports to skip via iptables for control plane components so they can communicate with the Kubernetes API Server |
| proxyInit.logFormat | string | plain | Log format (`plain` or `json`) for the proxy-init |
| proxyInit.logLevel | string | info | Log level for the proxy-init |
| proxyInit.privileged | bool | false | Privileged mode allows the container processes to inherit all security capabilities and bypass any security limitations enforced by the kubelet. When used with 'runAsRoot: true', the container will behave exactly as if it was running as root on the host. May escape cgroup limits and see other processes and devices on the host. |
| proxyInit.runAsGroup | int | `65534` | This value is used only if runAsRoot is false; otherwise runAsGroup will be 0 |
| proxyInit.runAsRoot | bool | `false` | Allow overriding the runAsNonRoot behaviour (<https://github.com/linkerd/linkerd2/issues/7308>) |
| proxyInit.runAsUser | int | `65534` | This value is used only if runAsRoot is false; otherwise runAsUser will be 0 |
| proxyInit.skipSubnets | string | `""` | Comma-separated list of subnets in valid CIDR format that should be skipped by the proxy |
| proxyInit.xtMountPath.mountPath | string | `"/run"` | |
| proxyInit.xtMountPath.name | string | `"linkerd-proxy-init-xtables-lock"` | |
| proxyInjector.caBundle | string | `""` | Bundle of CA certificates for proxy injector. If not provided nor injected with cert-manager, then Helm will use the certificate generated for `proxyInjector.crtPEM`. If `proxyInjector.externalSecret` is set to true, this value, injectCaFrom, or injectCaFromSecret must be set, as no certificate will be generated. See the cert-manager [CA Injector Docs](https://cert-manager.io/docs/concepts/ca-injector) for more information. |
| proxyInjector.crtPEM | string | `""` | Certificate for the proxy injector. If not provided and not using an external secret then Helm will generate one. |
| proxyInjector.externalSecret | bool | `false` | Do not create a secret resource for the proxyInjector webhook. If this is set to `true`, the value `proxyInjector.caBundle` must be set or the ca bundle must injected with cert-manager ca injector using `proxyInjector.injectCaFrom` or `proxyInjector.injectCaFromSecret` (see below). |
| proxyInjector.injectCaFrom | string | `""` | Inject the CA bundle from a cert-manager Certificate. See the cert-manager [CA Injector Docs](https://cert-manager.io/docs/concepts/ca-injector/#injecting-ca-data-from-a-certificate-resource) for more information. |
| proxyInjector.injectCaFromSecret | string | `""` | Inject the CA bundle from a Secret. If set, the `cert-manager.io/inject-ca-from-secret` annotation will be added to the webhook. The Secret must have the CA Bundle stored in the `ca.crt` key and have the `cert-manager.io/allow-direct-injection` annotation set to `true`. See the cert-manager [CA Injector Docs](https://cert-manager.io/docs/concepts/ca-injector/#injecting-ca-data-from-a-secret-resource) for more information. |
| proxyInjector.keyPEM | string | `""` | Certificate key for the proxy injector. If not provided and not using an external secret then Helm will generate one. |
| proxyInjector.livenessProbe.timeoutSeconds | int | `1` | |
| proxyInjector.namespaceSelector | object | `{"matchExpressions":[{"key":"config.linkerd.io/admission-webhooks","operator":"NotIn","values":["disabled"]},{"key":"kubernetes.io/metadata.name","operator":"NotIn","values":["kube-system","cert-manager"]}]}` | Namespace selector used by admission webhook. |
| proxyInjector.objectSelector | object | `{"matchExpressions":[{"key":"linkerd.io/control-plane-component","operator":"DoesNotExist"},{"key":"linkerd.io/cni-resource","operator":"DoesNotExist"}]}` | Object selector used by admission webhook. |
| proxyInjector.readinessProbe.timeoutSeconds | int | `1` | |
| proxyInjector.timeoutSeconds | int | `10` | Timeout in seconds before the API Server cancels a request to the proxy injector. If timeout is exceeded, the webhookfailurePolicy is used. |
| revisionHistoryLimit | int | `10` | Specifies the number of old ReplicaSets to retain to allow rollback. |
| runtimeClassName | string | `""` | Runtime Class Name for all the pods |
| spValidator | object | `{"livenessProbe":{"timeoutSeconds":1},"readinessProbe":{"timeoutSeconds":1}}` | SP validator configuration |
| webhookFailurePolicy | string | `"Ignore"` | Failure policy for the proxy injector |
----------------------------------------------
Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0)

View File

@ -0,0 +1,133 @@
{{ template "chart.header" . }}
{{ template "chart.description" . }}
{{ template "chart.versionBadge" . }}
{{ template "chart.typeBadge" . }}
{{ template "chart.appVersionBadge" . }}
{{ template "chart.homepageLine" . }}
## Quickstart and documentation
You can run Linkerd on any Kubernetes cluster in a matter of seconds. See the
[Linkerd Getting Started Guide][getting-started] for how.
For more comprehensive documentation, start with the [Linkerd
docs][linkerd-docs].
## Prerequisite: linkerd-crds chart
Before installing this chart, please install the `linkerd-crds` chart, which
creates all the CRDs that the components from the current chart require.
## Prerequisite: identity certificates
The identity component of Linkerd requires setting up a trust anchor
certificate, and an issuer certificate with its key. These need to be provided
to Helm by the user (unlike when using the `linkerd install` CLI which can
generate these automatically). You can provide your own, or follow [these
instructions](https://linkerd.io/2/tasks/generate-certificates/) to generate new
ones.
Alternatively, both trust anchor and identity issuer certificates may be
derived from in-cluster resources. Existing CA (trust anchor) certificates
**must** live in a `ConfigMap` resource named `linkerd-identity-trust-roots`.
Issuer certificates **must** live in a `Secret` named
`linkerd-identity-issuer`. Both resources should exist in the control-plane's
install namespace. In order to use an existing CA, Linkerd needs to be
installed with `identity.externalCA=true`. To use an existing issuer
certificate, Linkerd should be installed with
`identity.issuer.scheme=kubernetes.io/tls`.
A more comprehensive description is in the [automatic certificate rotation
guide](https://linkerd.io/2.12/tasks/automatically-rotating-control-plane-tls-credentials/#a-note-on-third-party-cert-management-solutions).
Note that the provided certificates must be ECDSA certificates.
## Adding Linkerd's Helm repository
Included here for completeness-sake, but should have already been added when
`linkerd-base` was installed.
```bash
# To add the repo for Linkerd edge releases:
helm repo add linkerd https://helm.linkerd.io/edge
```
## Installing the chart
You must provide the certificates and keys described in the preceding section,
and the same expiration date you used to generate the Issuer certificate.
```bash
helm install linkerd-control-plane -n linkerd \
--set-file identityTrustAnchorsPEM=ca.crt \
--set-file identity.issuer.tls.crtPEM=issuer.crt \
--set-file identity.issuer.tls.keyPEM=issuer.key \
linkerd/linkerd-control-plane
```
Note that you require to install this chart in the same namespace you installed
the `linkerd-base` chart.
## Setting High-Availability
Besides the default `values.yaml` file, the chart provides a `values-ha.yaml`
file that overrides some default values as to set things up under a
high-availability scenario, analogous to the `--ha` option in `linkerd install`.
Values such as higher number of replicas, higher memory/cpu limits and
affinities are specified in that file.
You can get ahold of `values-ha.yaml` by fetching the chart files:
```bash
helm fetch --untar linkerd/linkerd-control-plane
```
Then use the `-f` flag to provide the override file, for example:
```bash
helm install linkerd-control-plane -n linkerd \
--set-file identityTrustAnchorsPEM=ca.crt \
--set-file identity.issuer.tls.crtPEM=issuer.crt \
--set-file identity.issuer.tls.keyPEM=issuer.key \
-f linkerd2/values-ha.yaml
linkerd/linkerd-control-plane
```
## Get involved
* Check out Linkerd's source code at [GitHub][linkerd2].
* Join Linkerd's [user mailing list][linkerd-users], [developer mailing
list][linkerd-dev], and [announcements mailing list][linkerd-announce].
* Follow [@linkerd][twitter] on Twitter.
* Join the [Linkerd Slack][slack].
[getting-started]: https://linkerd.io/2/getting-started/
[linkerd2]: https://github.com/linkerd/linkerd2
[linkerd-announce]: https://lists.cncf.io/g/cncf-linkerd-announce
[linkerd-dev]: https://lists.cncf.io/g/cncf-linkerd-dev
[linkerd-docs]: https://linkerd.io/2/overview/
[linkerd-users]: https://lists.cncf.io/g/cncf-linkerd-users
[slack]: http://slack.linkerd.io
[twitter]: https://twitter.com/linkerd
## Extensions for Linkerd
The current chart installs the core Linkerd components, which grant you
reliability and security features. Other functionality is available through
extensions. Check the corresponding docs for each one of the following
extensions:
* Observability:
[Linkerd-viz](https://github.com/linkerd/linkerd2/blob/main/viz/charts/linkerd-viz/README.md)
* Multicluster:
[Linkerd-multicluster](https://github.com/linkerd/linkerd2/blob/main/multicluster/charts/linkerd-multicluster/README.md)
* Tracing:
[Linkerd-jaeger](https://github.com/linkerd/linkerd2/blob/main/jaeger/charts/linkerd-jaeger/README.md)
{{ template "chart.requirementsSection" . }}
{{ template "chart.valuesSection" . }}
{{ template "helm-docs.versionFooter" . }}

View File

@ -0,0 +1,14 @@
# Linkerd 2 Chart
Linkerd is an ultra light, ultra simple, ultra powerful service mesh. Linkerd
adds security, observability, and reliability to Kubernetes, without the
complexity.
This particular Helm chart only installs the control plane core. You will also need to install the
linkerd-crds chart. This chart should be automatically installed along with any other dependencies.
If it is not installed as a dependency, install it first.
To gain access to the observability features, please install the linkerd-viz chart.
Other extensions are available (multicluster, jaeger) under the linkerd Helm repo.
Full documentation available at: https://linkerd.io/2/overview/

View File

@ -0,0 +1,21 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj

View File

@ -0,0 +1,5 @@
apiVersion: v1
description: 'A Helm chart containing Linkerd partial templates, depended by the ''linkerd''
and ''patch'' charts. '
name: partials
version: 0.1.0

View File

@ -0,0 +1,9 @@
# partials
A Helm chart containing Linkerd partial templates,
depended by the 'linkerd' and 'patch' charts.
![Version: 0.1.0](https://img.shields.io/badge/Version-0.1.0-informational?style=flat-square)
----------------------------------------------
Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0)

View File

@ -0,0 +1,14 @@
{{ template "chart.header" . }}
{{ template "chart.description" . }}
{{ template "chart.versionBadge" . }}
{{ template "chart.typeBadge" . }}
{{ template "chart.appVersionBadge" . }}
{{ template "chart.homepageLine" . }}
{{ template "chart.requirementsSection" . }}
{{ template "chart.valuesSection" . }}
{{ template "helm-docs.versionFooter" . }}

View File

@ -0,0 +1,38 @@
{{ define "linkerd.pod-affinity" -}}
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchExpressions:
- key: {{ default "linkerd.io/control-plane-component" .label }}
operator: In
values:
- {{ .component }}
topologyKey: topology.kubernetes.io/zone
weight: 100
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: {{ default "linkerd.io/control-plane-component" .label }}
operator: In
values:
- {{ .component }}
topologyKey: kubernetes.io/hostname
{{- end }}
{{ define "linkerd.node-affinity" -}}
nodeAffinity:
{{- toYaml .Values.nodeAffinity | trim | nindent 2 }}
{{- end }}
{{ define "linkerd.affinity" -}}
{{- if or .Values.enablePodAntiAffinity .Values.nodeAffinity -}}
affinity:
{{- end }}
{{- if .Values.enablePodAntiAffinity -}}
{{- include "linkerd.pod-affinity" . | nindent 2 }}
{{- end }}
{{- if .Values.nodeAffinity -}}
{{- include "linkerd.node-affinity" . | nindent 2 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,16 @@
{{- define "partials.proxy.capabilities" -}}
capabilities:
{{- if .Values.proxy.capabilities.add }}
add:
{{- toYaml .Values.proxy.capabilities.add | trim | nindent 4 }}
{{- end }}
{{- if .Values.proxy.capabilities.drop }}
drop:
{{- toYaml .Values.proxy.capabilities.drop | trim | nindent 4 }}
{{- end }}
{{- end -}}
{{- define "partials.proxy-init.capabilities.drop" -}}
drop:
{{ toYaml .Values.proxyInit.capabilities.drop | trim }}
{{- end -}}

View File

@ -0,0 +1,15 @@
{{- define "partials.debug" -}}
image: {{.Values.debugContainer.image.name}}:{{.Values.debugContainer.image.version | default .Values.linkerdVersion}}
imagePullPolicy: {{.Values.debugContainer.image.pullPolicy | default .Values.imagePullPolicy}}
name: linkerd-debug
terminationMessagePolicy: FallbackToLogsOnError
# some environments require probes, so we provide some infallible ones
livenessProbe:
exec:
command:
- "true"
readinessProbe:
exec:
command:
- "true"
{{- end -}}

View File

@ -0,0 +1,14 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Splits a coma separated list into a list of string values.
For example "11,22,55,44" will become "11","22","55","44"
*/}}
{{- define "partials.splitStringList" -}}
{{- if gt (len (toString .)) 0 -}}
{{- $ports := toString . | splitList "," -}}
{{- $last := sub (len $ports) 1 -}}
{{- range $i,$port := $ports -}}
"{{$port}}"{{ternary "," "" (ne $i $last)}}
{{- end -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,17 @@
{{- define "partials.annotations.created-by" -}}
linkerd.io/created-by: {{ .Values.cliVersion | default (printf "linkerd/helm %s" ( (.Values.image).version | default .Values.linkerdVersion)) }}
{{- end -}}
{{- define "partials.proxy.annotations" -}}
linkerd.io/proxy-version: {{.Values.proxy.image.version | default .Values.linkerdVersion}}
cluster-autoscaler.kubernetes.io/safe-to-evict: "true"
linkerd.io/trust-root-sha256: {{ .Values.identityTrustAnchorsPEM | sha256sum }}
{{- end -}}
{{/*
To add labels to the control-plane components, instead update at individual component manifests as
adding here would also update `spec.selector.matchLabels` which are immutable and would fail upgrades.
*/}}
{{- define "partials.proxy.labels" -}}
linkerd.io/proxy-{{.workloadKind}}: {{.component}}
{{- end -}}

View File

@ -0,0 +1,45 @@
{{- define "partials.network-validator" -}}
name: linkerd-network-validator
image: {{.Values.proxy.image.name}}:{{.Values.proxy.image.version | default .Values.linkerdVersion }}
imagePullPolicy: {{.Values.proxy.image.pullPolicy | default .Values.imagePullPolicy}}
{{ include "partials.resources" .Values.proxy.resources }}
{{- if or .Values.networkValidator.enableSecurityContext }}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsGroup: 65534
runAsNonRoot: true
runAsUser: 65534
seccompProfile:
type: RuntimeDefault
{{- end }}
command:
- /usr/lib/linkerd/linkerd2-network-validator
args:
- --log-format
- {{ .Values.networkValidator.logFormat }}
- --log-level
- {{ .Values.networkValidator.logLevel }}
- --connect-addr
{{- if .Values.networkValidator.connectAddr }}
- {{ .Values.networkValidator.connectAddr | quote }}
{{- else if .Values.disableIPv6}}
- "1.1.1.1:20001"
{{- else }}
- "[fd00::1]:20001"
{{- end }}
- --listen-addr
{{- if .Values.networkValidator.listenAddr }}
- {{ .Values.networkValidator.listenAddr | quote }}
{{- else if .Values.disableIPv6}}
- "0.0.0.0:4140"
{{- else }}
- "[::]:4140"
{{- end }}
- --timeout
- {{ .Values.networkValidator.timeout }}
{{- end -}}

View File

@ -0,0 +1,4 @@
{{- define "linkerd.node-selector" -}}
nodeSelector:
{{- toYaml .Values.nodeSelector | trim | nindent 2 }}
{{- end -}}

View File

@ -0,0 +1,18 @@
{{- define "partials.proxy.config.annotations" -}}
{{- with .cpu }}
{{- with .request -}}
config.linkerd.io/proxy-cpu-request: {{. | quote}}
{{end}}
{{- with .limit -}}
config.linkerd.io/proxy-cpu-limit: {{. | quote}}
{{- end}}
{{- end}}
{{- with .memory }}
{{- with .request }}
config.linkerd.io/proxy-memory-request: {{. | quote}}
{{end}}
{{- with .limit -}}
config.linkerd.io/proxy-memory-limit: {{. | quote}}
{{- end}}
{{- end }}
{{- end }}

View File

@ -0,0 +1,98 @@
{{- define "partials.proxy-init" -}}
args:
{{- if (.Values.proxyInit.iptablesMode | default "legacy" | eq "nft") }}
- --firewall-bin-path
- "iptables-nft"
- --firewall-save-bin-path
- "iptables-nft-save"
{{- else if not (eq .Values.proxyInit.iptablesMode "legacy") }}
{{ fail (printf "Unsupported value \"%s\" for proxyInit.iptablesMode\nValid values: [\"nft\", \"legacy\"]" .Values.proxyInit.iptablesMode) }}
{{end -}}
{{- if .Values.disableIPv6 }}
- --ipv6=false
{{- end }}
- --incoming-proxy-port
- {{.Values.proxy.ports.inbound | quote}}
- --outgoing-proxy-port
- {{.Values.proxy.ports.outbound | quote}}
- --proxy-uid
- {{.Values.proxy.uid | quote}}
{{- if ge (int .Values.proxy.gid) 0 }}
- --proxy-gid
- {{.Values.proxy.gid | quote}}
{{- end }}
- --inbound-ports-to-ignore
- "{{.Values.proxy.ports.control}},{{.Values.proxy.ports.admin}}{{ternary (printf ",%s" (.Values.proxyInit.ignoreInboundPorts | toString)) "" (not (empty .Values.proxyInit.ignoreInboundPorts)) }}"
{{- if .Values.proxyInit.ignoreOutboundPorts }}
- --outbound-ports-to-ignore
- {{.Values.proxyInit.ignoreOutboundPorts | quote}}
{{- end }}
{{- if .Values.proxyInit.closeWaitTimeoutSecs }}
- --timeout-close-wait-secs
- {{ .Values.proxyInit.closeWaitTimeoutSecs | quote}}
{{- end }}
{{- if .Values.proxyInit.logFormat }}
- --log-format
- {{ .Values.proxyInit.logFormat }}
{{- end }}
{{- if .Values.proxyInit.logLevel }}
- --log-level
- {{ .Values.proxyInit.logLevel }}
{{- end }}
{{- if .Values.proxyInit.skipSubnets }}
- --subnets-to-ignore
- {{ .Values.proxyInit.skipSubnets | quote }}
{{- end }}
image: {{.Values.proxyInit.image.name}}:{{.Values.proxyInit.image.version}}
imagePullPolicy: {{.Values.proxyInit.image.pullPolicy | default .Values.imagePullPolicy}}
name: linkerd-init
{{ include "partials.resources" .Values.proxy.resources }}
securityContext:
{{- if or .Values.proxyInit.closeWaitTimeoutSecs .Values.proxyInit.privileged }}
allowPrivilegeEscalation: true
{{- else }}
allowPrivilegeEscalation: false
{{- end }}
capabilities:
add:
- NET_ADMIN
- NET_RAW
{{- if .Values.proxyInit.capabilities -}}
{{- if .Values.proxyInit.capabilities.add }}
{{- toYaml .Values.proxyInit.capabilities.add | trim | nindent 4 }}
{{- end }}
{{- if .Values.proxyInit.capabilities.drop -}}
{{- include "partials.proxy-init.capabilities.drop" . | nindent 4 -}}
{{- end }}
{{- end }}
{{- if or .Values.proxyInit.closeWaitTimeoutSecs .Values.proxyInit.privileged }}
privileged: true
{{- else }}
privileged: false
{{- end }}
{{- if .Values.proxyInit.runAsRoot }}
runAsGroup: 0
runAsNonRoot: false
runAsUser: 0
{{- else }}
runAsNonRoot: true
runAsUser: {{ .Values.proxyInit.runAsUser | int | eq 0 | ternary 65534 .Values.proxyInit.runAsUser }}
runAsGroup: {{ .Values.proxyInit.runAsGroup | int | eq 0 | ternary 65534 .Values.proxyInit.runAsGroup }}
{{- end }}
readOnlyRootFilesystem: true
seccompProfile:
type: RuntimeDefault
terminationMessagePolicy: FallbackToLogsOnError
{{- if or (not .Values.cniEnabled) .Values.proxyInit.saMountPath }}
volumeMounts:
{{- end -}}
{{- if not .Values.cniEnabled }}
- mountPath: {{.Values.proxyInit.xtMountPath.mountPath}}
name: {{.Values.proxyInit.xtMountPath.name}}
{{- end -}}
{{- if .Values.proxyInit.saMountPath }}
- mountPath: {{.Values.proxyInit.saMountPath.mountPath}}
name: {{.Values.proxyInit.saMountPath.name}}
readOnly: {{.Values.proxyInit.saMountPath.readOnly}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,267 @@
{{ define "partials.proxy" -}}
{{ if and .Values.proxy.nativeSidecar .Values.proxy.waitBeforeExitSeconds }}
{{ fail "proxy.nativeSidecar and waitBeforeExitSeconds cannot be used simultaneously" }}
{{- end }}
{{- if not (has .Values.proxy.logHTTPHeaders (list "insecure" "off" "")) }}
{{- fail "logHTTPHeaders must be one of: insecure | off" }}
{{- end }}
{{- $trustDomain := (.Values.identityTrustDomain | default .Values.clusterDomain) -}}
env:
- name: _pod_name
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: _pod_ns
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: _pod_nodeName
valueFrom:
fieldRef:
fieldPath: spec.nodeName
{{- if .Values.proxy.cores }}
- name: LINKERD2_PROXY_CORES
value: {{.Values.proxy.cores | quote}}
{{- end }}
{{ if .Values.proxy.requireIdentityOnInboundPorts -}}
- name: LINKERD2_PROXY_INBOUND_PORTS_REQUIRE_IDENTITY
value: {{.Values.proxy.requireIdentityOnInboundPorts | quote}}
{{ end -}}
{{ if .Values.proxy.requireTLSOnInboundPorts -}}
- name: LINKERD2_PROXY_INBOUND_PORTS_REQUIRE_TLS
value: {{.Values.proxy.requireTLSOnInboundPorts | quote}}
{{ end -}}
- name: LINKERD2_PROXY_SHUTDOWN_ENDPOINT_ENABLED
value: {{.Values.proxy.enableShutdownEndpoint | quote}}
- name: LINKERD2_PROXY_LOG
value: "{{.Values.proxy.logLevel}}{{ if not (eq .Values.proxy.logHTTPHeaders "insecure") }},[{headers}]=off,[{request}]=off{{ end }}"
- name: LINKERD2_PROXY_LOG_FORMAT
value: {{.Values.proxy.logFormat | quote}}
- name: LINKERD2_PROXY_DESTINATION_SVC_ADDR
value: {{ternary "localhost.:8086" (printf "linkerd-dst-headless.%s.svc.%s.:8086" .Release.Namespace .Values.clusterDomain) (eq (toString .Values.proxy.component) "linkerd-destination")}}
- name: LINKERD2_PROXY_DESTINATION_PROFILE_NETWORKS
value: {{.Values.clusterNetworks | quote}}
- name: LINKERD2_PROXY_POLICY_SVC_ADDR
value: {{ternary "localhost.:8090" (printf "linkerd-policy.%s.svc.%s.:8090" .Release.Namespace .Values.clusterDomain) (eq (toString .Values.proxy.component) "linkerd-destination")}}
- name: LINKERD2_PROXY_POLICY_WORKLOAD
value: |
{"ns":"$(_pod_ns)", "pod":"$(_pod_name)"}
- name: LINKERD2_PROXY_INBOUND_DEFAULT_POLICY
value: {{.Values.proxy.defaultInboundPolicy}}
- name: LINKERD2_PROXY_POLICY_CLUSTER_NETWORKS
value: {{.Values.clusterNetworks | quote}}
- name: LINKERD2_PROXY_CONTROL_STREAM_INITIAL_TIMEOUT
value: {{((.Values.proxy.control).streams).initialTimeout | default "" | quote}}
- name: LINKERD2_PROXY_CONTROL_STREAM_IDLE_TIMEOUT
value: {{((.Values.proxy.control).streams).idleTimeout | default "" | quote}}
- name: LINKERD2_PROXY_CONTROL_STREAM_LIFETIME
value: {{((.Values.proxy.control).streams).lifetime | default "" | quote}}
{{ if .Values.proxy.inboundConnectTimeout -}}
- name: LINKERD2_PROXY_INBOUND_CONNECT_TIMEOUT
value: {{.Values.proxy.inboundConnectTimeout | quote}}
{{ end -}}
{{ if .Values.proxy.outboundConnectTimeout -}}
- name: LINKERD2_PROXY_OUTBOUND_CONNECT_TIMEOUT
value: {{.Values.proxy.outboundConnectTimeout | quote}}
{{ end -}}
{{ if .Values.proxy.outboundDiscoveryCacheUnusedTimeout -}}
- name: LINKERD2_PROXY_OUTBOUND_DISCOVERY_IDLE_TIMEOUT
value: {{.Values.proxy.outboundDiscoveryCacheUnusedTimeout | quote}}
{{ end -}}
{{ if .Values.proxy.inboundDiscoveryCacheUnusedTimeout -}}
- name: LINKERD2_PROXY_INBOUND_DISCOVERY_IDLE_TIMEOUT
value: {{.Values.proxy.inboundDiscoveryCacheUnusedTimeout | quote}}
{{ end -}}
{{ if .Values.proxy.disableOutboundProtocolDetectTimeout -}}
- name: LINKERD2_PROXY_OUTBOUND_DETECT_TIMEOUT
value: "365d"
{{ end -}}
{{ if .Values.proxy.disableInboundProtocolDetectTimeout -}}
- name: LINKERD2_PROXY_INBOUND_DETECT_TIMEOUT
value: "365d"
{{ end -}}
- name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR
value: "{{ if .Values.disableIPv6 }}0.0.0.0{{ else }}[::]{{ end }}:{{.Values.proxy.ports.control}}"
- name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR
value: "{{ if .Values.disableIPv6 }}0.0.0.0{{ else }}[::]{{ end }}:{{.Values.proxy.ports.admin}}"
{{- /* Deprecated, superseded by LINKERD2_PROXY_OUTBOUND_LISTEN_ADDRS since proxy's v2.228.0 (deployed since edge-24.4.5) */}}
- name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR
value: "127.0.0.1:{{.Values.proxy.ports.outbound}}"
- name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDRS
value: "127.0.0.1:{{.Values.proxy.ports.outbound}}{{ if not .Values.disableIPv6}},[::1]:{{.Values.proxy.ports.outbound}}{{ end }}"
- name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR
value: "{{ if .Values.disableIPv6 }}0.0.0.0{{ else }}[::]{{ end }}:{{.Values.proxy.ports.inbound}}"
- name: LINKERD2_PROXY_INBOUND_IPS
valueFrom:
fieldRef:
fieldPath: status.podIPs
- name: LINKERD2_PROXY_INBOUND_PORTS
value: {{ .Values.proxy.podInboundPorts | quote }}
{{ if .Values.proxy.isGateway -}}
- name: LINKERD2_PROXY_INBOUND_GATEWAY_SUFFIXES
value: {{printf "svc.%s." .Values.clusterDomain}}
{{ end -}}
{{ if .Values.proxy.isIngress -}}
- name: LINKERD2_PROXY_INGRESS_MODE
value: "true"
{{ end -}}
- name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES
{{- $internalDomain := printf "svc.%s." .Values.clusterDomain }}
value: {{ternary "." $internalDomain .Values.proxy.enableExternalProfiles}}
- name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE
value: 10000ms
- name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE
value: 10000ms
{{- /* Configure inbound and outbound parameters, e.g. for HTTP/2 servers. */}}
{{ range $proxyK, $proxyV := (dict "inbound" .Values.proxy.inbound "outbound" .Values.proxy.outbound) -}}
{{ range $scopeK, $scopeV := $proxyV -}}
{{ range $protoK, $protoV := $scopeV -}}
{{ range $paramK, $paramV := $protoV -}}
- name: LINKERD2_PROXY_{{snakecase $proxyK | upper}}_{{snakecase $scopeK | upper}}_{{snakecase $protoK | upper}}_{{snakecase $paramK | upper}}
value: {{ quote $paramV }}
{{ end -}}
{{ end -}}
{{ end -}}
{{ end -}}
{{ if .Values.proxy.opaquePorts -}}
- name: LINKERD2_PROXY_INBOUND_PORTS_DISABLE_PROTOCOL_DETECTION
value: {{.Values.proxy.opaquePorts | quote}}
{{ end -}}
- name: LINKERD2_PROXY_DESTINATION_CONTEXT
value: |
{"ns":"$(_pod_ns)", "nodeName":"$(_pod_nodeName)", "pod":"$(_pod_name)"}
- name: _pod_sa
valueFrom:
fieldRef:
fieldPath: spec.serviceAccountName
- name: _l5d_ns
value: {{.Release.Namespace}}
- name: _l5d_trustdomain
value: {{$trustDomain}}
- name: LINKERD2_PROXY_IDENTITY_DIR
value: /var/run/linkerd/identity/end-entity
- name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS
{{- /*
Pods in the `linkerd` namespace are not injected by the proxy injector and instead obtain
the trust anchor bundle from the `linkerd-identity-trust-roots` configmap. This should not
be used in other contexts.
*/}}
{{- if .Values.proxy.loadTrustBundleFromConfigMap }}
valueFrom:
configMapKeyRef:
name: linkerd-identity-trust-roots
key: ca-bundle.crt
{{ else }}
value: |
{{- required "Please provide the identity trust anchors" .Values.identityTrustAnchorsPEM | trim | nindent 4 }}
{{ end -}}
- name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE
{{- if .Values.identity.serviceAccountTokenProjection }}
value: /var/run/secrets/tokens/linkerd-identity-token
{{ else }}
value: /var/run/secrets/kubernetes.io/serviceaccount/token
{{ end -}}
- name: LINKERD2_PROXY_IDENTITY_SVC_ADDR
value: {{ternary "localhost.:8080" (printf "linkerd-identity-headless.%s.svc.%s.:8080" .Release.Namespace .Values.clusterDomain) (eq (toString .Values.proxy.component) "linkerd-identity")}}
- name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME
value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.{{.Release.Namespace}}.{{$trustDomain}}
- name: LINKERD2_PROXY_IDENTITY_SVC_NAME
value: linkerd-identity.{{.Release.Namespace}}.serviceaccount.identity.{{.Release.Namespace}}.{{$trustDomain}}
- name: LINKERD2_PROXY_DESTINATION_SVC_NAME
value: linkerd-destination.{{.Release.Namespace}}.serviceaccount.identity.{{.Release.Namespace}}.{{$trustDomain}}
- name: LINKERD2_PROXY_POLICY_SVC_NAME
value: linkerd-destination.{{.Release.Namespace}}.serviceaccount.identity.{{.Release.Namespace}}.{{$trustDomain}}
{{ if .Values.proxy.accessLog -}}
- name: LINKERD2_PROXY_ACCESS_LOG
value: {{.Values.proxy.accessLog | quote}}
{{ end -}}
{{ if .Values.proxy.shutdownGracePeriod -}}
- name: LINKERD2_PROXY_SHUTDOWN_GRACE_PERIOD
value: {{.Values.proxy.shutdownGracePeriod | quote}}
{{ end -}}
{{ if .Values.proxy.additionalEnv -}}
{{ toYaml .Values.proxy.additionalEnv }}
{{ end -}}
{{ if .Values.proxy.experimentalEnv -}}
{{ toYaml .Values.proxy.experimentalEnv }}
{{ end -}}
image: {{.Values.proxy.image.name}}:{{.Values.proxy.image.version | default .Values.linkerdVersion}}
imagePullPolicy: {{.Values.proxy.image.pullPolicy | default .Values.imagePullPolicy}}
livenessProbe:
httpGet:
path: /live
port: {{.Values.proxy.ports.admin}}
initialDelaySeconds: {{.Values.proxy.livenessProbe.initialDelaySeconds }}
timeoutSeconds: {{.Values.proxy.livenessProbe.timeoutSeconds }}
name: linkerd-proxy
ports:
- containerPort: {{.Values.proxy.ports.inbound}}
name: linkerd-proxy
- containerPort: {{.Values.proxy.ports.admin}}
name: linkerd-admin
readinessProbe:
httpGet:
path: /ready
port: {{.Values.proxy.ports.admin}}
initialDelaySeconds: {{.Values.proxy.readinessProbe.initialDelaySeconds }}
timeoutSeconds: {{.Values.proxy.readinessProbe.timeoutSeconds }}
{{- if and .Values.proxy.nativeSidecar .Values.proxy.await }}
startupProbe:
httpGet:
path: /ready
port: {{.Values.proxy.ports.admin}}
initialDelaySeconds: {{.Values.proxy.startupProbe.initialDelaySeconds}}
periodSeconds: {{.Values.proxy.startupProbe.periodSeconds}}
failureThreshold: {{.Values.proxy.startupProbe.failureThreshold}}
{{- end }}
{{- if .Values.proxy.resources }}
{{ include "partials.resources" .Values.proxy.resources }}
{{- end }}
securityContext:
allowPrivilegeEscalation: false
{{- if .Values.proxy.capabilities -}}
{{- include "partials.proxy.capabilities" . | nindent 2 -}}
{{- end }}
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: {{.Values.proxy.uid}}
{{- if ge (int .Values.proxy.gid) 0 }}
runAsGroup: {{.Values.proxy.gid}}
{{- end }}
seccompProfile:
type: RuntimeDefault
terminationMessagePolicy: FallbackToLogsOnError
{{- if and (not .Values.proxy.nativeSidecar) (or .Values.proxy.await .Values.proxy.waitBeforeExitSeconds) }}
lifecycle:
{{- if .Values.proxy.await }}
postStart:
exec:
command:
- /usr/lib/linkerd/linkerd-await
- --timeout=2m
- --port={{.Values.proxy.ports.admin}}
{{- end }}
{{- if .Values.proxy.waitBeforeExitSeconds }}
preStop:
exec:
command:
- /bin/sleep
- {{.Values.proxy.waitBeforeExitSeconds | quote}}
{{- end }}
{{- end }}
volumeMounts:
- mountPath: /var/run/linkerd/identity/end-entity
name: linkerd-identity-end-entity
{{- if .Values.identity.serviceAccountTokenProjection }}
- mountPath: /var/run/secrets/tokens
name: linkerd-identity-token
{{- end }}
{{- if .Values.proxy.saMountPath }}
- mountPath: {{.Values.proxy.saMountPath.mountPath}}
name: {{.Values.proxy.saMountPath.name}}
readOnly: {{.Values.proxy.saMountPath.readOnly}}
{{- end -}}
{{- if .Values.proxy.nativeSidecar }}
restartPolicy: Always
{{- end -}}
{{- end }}

View File

@ -0,0 +1,6 @@
{{- define "partials.image-pull-secrets"}}
{{- if . }}
imagePullSecrets:
{{ toYaml . | indent 2 }}
{{- end }}
{{- end -}}

View File

@ -0,0 +1,28 @@
{{- define "partials.resources" -}}
{{- $ephemeralStorage := index . "ephemeral-storage" -}}
resources:
{{- if or (.cpu).limit (.memory).limit ($ephemeralStorage).limit }}
limits:
{{- with (.cpu).limit }}
cpu: {{. | quote}}
{{- end }}
{{- with (.memory).limit }}
memory: {{. | quote}}
{{- end }}
{{- with ($ephemeralStorage).limit }}
ephemeral-storage: {{. | quote}}
{{- end }}
{{- end }}
{{- if or (.cpu).request (.memory).request ($ephemeralStorage).request }}
requests:
{{- with (.cpu).request }}
cpu: {{. | quote}}
{{- end }}
{{- with (.memory).request }}
memory: {{. | quote}}
{{- end }}
{{- with ($ephemeralStorage).request }}
ephemeral-storage: {{. | quote}}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,4 @@
{{- define "linkerd.tolerations" -}}
tolerations:
{{ toYaml .Values.tolerations | trim | indent 2 }}
{{- end -}}

View File

@ -0,0 +1,5 @@
{{ define "partials.linkerd.trace" -}}
{{ if .Values.controlPlaneTracing -}}
- -trace-collector=collector.{{.Values.controlPlaneTracingNamespace}}.svc.{{.Values.clusterDomain}}:55678
{{ end -}}
{{- end }}

View File

@ -0,0 +1,19 @@
{{- define "linkerd.webhook.validation" -}}
{{- if and (.injectCaFrom) (.injectCaFromSecret) -}}
{{- fail "injectCaFrom and injectCaFromSecret cannot both be set" -}}
{{- end -}}
{{- if and (or (.injectCaFrom) (.injectCaFromSecret)) (.caBundle) -}}
{{- fail "injectCaFrom or injectCaFromSecret cannot be set if providing a caBundle" -}}
{{- end -}}
{{- if and (.externalSecret) (empty .caBundle) (empty .injectCaFrom) (empty .injectCaFromSecret) -}}
{{- fail "if externalSecret is set, then caBundle, injectCaFrom, or injectCaFromSecret must be set" -}}
{{- end }}
{{- if and (or .injectCaFrom .injectCaFromSecret .caBundle) (not .externalSecret) -}}
{{- fail "if caBundle, injectCaFrom, or injectCaFromSecret is set, then externalSecret must be set" -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,20 @@
{{ define "partials.proxy.volumes.identity" -}}
emptyDir:
medium: Memory
name: linkerd-identity-end-entity
{{- end -}}
{{ define "partials.proxyInit.volumes.xtables" -}}
emptyDir: {}
name: {{ .Values.proxyInit.xtMountPath.name }}
{{- end -}}
{{- define "partials.proxy.volumes.service-account-token" -}}
name: linkerd-identity-token
projected:
sources:
- serviceAccountToken:
path: linkerd-identity-token
expirationSeconds: 86400 {{- /* # 24 hours */}}
audience: identity.l5d.io
{{- end -}}

View File

@ -0,0 +1,19 @@
questions:
- variable: identityTrustAnchorsPEM
label: "Trust root certificate (ECDSA)"
description: "Root certificate used to support mTLS connections between meshed pods"
required: true
type: multiline
group: Identity
- variable: identity.issuer.tls.crtPEM
label: "Issuer certificate (ECDSA)"
description: "Intermediate certificate, rooted on identityTrustAnchorsPEM, used to sign the Linkerd proxies' CSR"
required: true
type: multiline
group: Identity
- variable: identity.issuer.tls.keyPEM
label: "Key for the issuer certificate (ECDSA)"
description: "Private key for the certificate entered on crtPEM"
required: true
type: multiline
group: Identity

View File

@ -0,0 +1,19 @@
The Linkerd control plane was successfully installed 🎉
To help you manage your Linkerd service mesh you can install the Linkerd CLI by running:
curl -sL https://run.linkerd.io/install | sh
Alternatively, you can download the CLI directly via the Linkerd releases page:
https://github.com/linkerd/linkerd2/releases/
To make sure everything works as expected, run the following:
linkerd check
The viz extension can be installed by running:
helm install linkerd-viz linkerd/linkerd-viz
Looking for more? Visit https://linkerd.io/2/getting-started/

View File

@ -0,0 +1,16 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
linkerd.io/control-plane-ns: {{.Release.Namespace}}
{{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
annotations:
{{ include "partials.annotations.created-by" . }}
name: ext-namespace-metadata-linkerd-config
namespace: {{ .Release.Namespace }}
rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get"]
resourceNames: ["linkerd-config"]

View File

@ -0,0 +1,39 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: linkerd-config
namespace: {{ .Release.Namespace }}
labels:
linkerd.io/control-plane-component: controller
linkerd.io/control-plane-ns: {{.Release.Namespace}}
{{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
annotations:
{{ include "partials.annotations.created-by" . }}
data:
linkerd-crds-chart-version: linkerd-crds-1.0.0-edge
values: |
{{- $values := deepCopy .Values }}
{{- /*
WARNING! All sensitive or private data such as TLS keys must be removed
here to avoid it being publicly readable.
*/ -}}
{{- if kindIs "map" $values.identity.issuer.tls -}}
{{- $_ := unset $values.identity.issuer.tls "keyPEM"}}
{{- end -}}
{{- if kindIs "map" $values.profileValidator -}}
{{- $_ := unset $values.profileValidator "keyPEM"}}
{{- end -}}
{{- if kindIs "map" $values.proxyInjector -}}
{{- $_ := unset $values.proxyInjector "keyPEM"}}
{{- end -}}
{{- if kindIs "map" $values.policyValidator -}}
{{- $_ := unset $values.policyValidator "keyPEM"}}
{{- end -}}
{{- if (empty $values.identityTrustDomain) -}}
{{- $_ := set $values "identityTrustDomain" $values.clusterDomain}}
{{- end -}}
{{- $_ := unset $values "partials"}}
{{- $_ := unset $values "configs"}}
{{- $_ := unset $values "stage"}}
{{- toYaml $values | trim | nindent 4 }}

View File

@ -0,0 +1,327 @@
---
###
### Destination Controller Service
###
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: linkerd-{{.Release.Namespace}}-destination
labels:
linkerd.io/control-plane-component: destination
linkerd.io/control-plane-ns: {{.Release.Namespace}}
{{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
rules:
- apiGroups: ["apps"]
resources: ["replicasets"]
verbs: ["list", "get", "watch"]
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["list", "get", "watch"]
- apiGroups: [""]
resources: ["pods", "endpoints", "services", "nodes"]
verbs: ["list", "get", "watch"]
- apiGroups: ["linkerd.io"]
resources: ["serviceprofiles"]
verbs: ["list", "get", "watch"]
- apiGroups: ["workload.linkerd.io"]
resources: ["externalworkloads"]
verbs: ["list", "get", "watch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["create", "get", "update", "patch"]
{{- if .Values.enableEndpointSlices }}
- apiGroups: ["discovery.k8s.io"]
resources: ["endpointslices"]
verbs: ["list", "get", "watch", "create", "update", "patch", "delete"]
{{- end }}
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: linkerd-{{.Release.Namespace}}-destination
labels:
linkerd.io/control-plane-component: destination
linkerd.io/control-plane-ns: {{.Release.Namespace}}
{{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: linkerd-{{.Release.Namespace}}-destination
subjects:
- kind: ServiceAccount
name: linkerd-destination
namespace: {{.Release.Namespace}}
---
kind: ServiceAccount
apiVersion: v1
metadata:
name: linkerd-destination
namespace: {{ .Release.Namespace }}
labels:
linkerd.io/control-plane-component: destination
linkerd.io/control-plane-ns: {{.Release.Namespace}}
{{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
{{- include "partials.image-pull-secrets" .Values.imagePullSecrets }}
---
{{- $host := printf "linkerd-sp-validator.%s.svc" .Release.Namespace }}
{{- $ca := genSelfSignedCert $host (list) (list $host) 365 }}
{{- if (not .Values.profileValidator.externalSecret) }}
kind: Secret
apiVersion: v1
metadata:
name: linkerd-sp-validator-k8s-tls
namespace: {{ .Release.Namespace }}
labels:
linkerd.io/control-plane-component: destination
linkerd.io/control-plane-ns: {{.Release.Namespace}}
{{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
annotations:
{{ include "partials.annotations.created-by" . }}
type: kubernetes.io/tls
data:
tls.crt: {{ ternary (b64enc (trim $ca.Cert)) (b64enc (trim .Values.profileValidator.crtPEM)) (empty .Values.profileValidator.crtPEM) }}
tls.key: {{ ternary (b64enc (trim $ca.Key)) (b64enc (trim .Values.profileValidator.keyPEM)) (empty .Values.profileValidator.keyPEM) }}
---
{{- end }}
{{- include "linkerd.webhook.validation" .Values.profileValidator }}
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
name: linkerd-sp-validator-webhook-config
{{- if or (.Values.profileValidator.injectCaFrom) (.Values.profileValidator.injectCaFromSecret) }}
annotations:
{{- if .Values.profileValidator.injectCaFrom }}
cert-manager.io/inject-ca-from: {{ .Values.profileValidator.injectCaFrom }}
{{- end }}
{{- if .Values.profileValidator.injectCaFromSecret }}
cert-manager.io/inject-ca-from-secret: {{ .Values.profileValidator.injectCaFromSecret }}
{{- end }}
{{- end }}
labels:
linkerd.io/control-plane-component: destination
linkerd.io/control-plane-ns: {{.Release.Namespace}}
{{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
webhooks:
- name: linkerd-sp-validator.linkerd.io
namespaceSelector:
{{- toYaml .Values.profileValidator.namespaceSelector | trim | nindent 4 }}
clientConfig:
service:
name: linkerd-sp-validator
namespace: {{ .Release.Namespace }}
path: "/"
{{- if and (empty .Values.profileValidator.injectCaFrom) (empty .Values.profileValidator.injectCaFromSecret) }}
caBundle: {{ ternary (b64enc (trim $ca.Cert)) (b64enc (trim .Values.profileValidator.caBundle)) (empty .Values.profileValidator.caBundle) }}
{{- end }}
failurePolicy: {{.Values.webhookFailurePolicy}}
admissionReviewVersions: ["v1", "v1beta1"]
rules:
- operations: ["CREATE", "UPDATE"]
apiGroups: ["linkerd.io"]
apiVersions: ["v1alpha1", "v1alpha2"]
resources: ["serviceprofiles"]
sideEffects: None
---
{{- $host := printf "linkerd-policy-validator.%s.svc" .Release.Namespace }}
{{- $ca := genSelfSignedCert $host (list) (list $host) 365 }}
{{- if (not .Values.policyValidator.externalSecret) }}
kind: Secret
apiVersion: v1
metadata:
name: linkerd-policy-validator-k8s-tls
namespace: {{ .Release.Namespace }}
labels:
linkerd.io/control-plane-component: destination
linkerd.io/control-plane-ns: {{.Release.Namespace}}
{{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
annotations:
{{ include "partials.annotations.created-by" . }}
type: kubernetes.io/tls
data:
tls.crt: {{ ternary (b64enc (trim $ca.Cert)) (b64enc (trim .Values.policyValidator.crtPEM)) (empty .Values.policyValidator.crtPEM) }}
tls.key: {{ ternary (b64enc (trim $ca.Key)) (b64enc (trim .Values.policyValidator.keyPEM)) (empty .Values.policyValidator.keyPEM) }}
---
{{- end }}
{{- include "linkerd.webhook.validation" .Values.policyValidator }}
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
name: linkerd-policy-validator-webhook-config
{{- if or (.Values.policyValidator.injectCaFrom) (.Values.policyValidator.injectCaFromSecret) }}
annotations:
{{- if .Values.policyValidator.injectCaFrom }}
cert-manager.io/inject-ca-from: {{ .Values.policyValidator.injectCaFrom }}
{{- end }}
{{- if .Values.policyValidator.injectCaFromSecret }}
cert-manager.io/inject-ca-from-secret: {{ .Values.policyValidator.injectCaFromSecret }}
{{- end }}
{{- end }}
labels:
linkerd.io/control-plane-component: destination
linkerd.io/control-plane-ns: {{.Release.Namespace}}
{{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
webhooks:
- name: linkerd-policy-validator.linkerd.io
namespaceSelector:
{{- toYaml .Values.policyValidator.namespaceSelector | trim | nindent 4 }}
clientConfig:
service:
name: linkerd-policy-validator
namespace: {{ .Release.Namespace }}
path: "/"
{{- if and (empty .Values.policyValidator.injectCaFrom) (empty .Values.policyValidator.injectCaFromSecret) }}
caBundle: {{ ternary (b64enc (trim $ca.Cert)) (b64enc (trim .Values.policyValidator.caBundle)) (empty .Values.policyValidator.caBundle) }}
{{- end }}
failurePolicy: {{.Values.webhookFailurePolicy}}
admissionReviewVersions: ["v1", "v1beta1"]
rules:
- operations: ["CREATE", "UPDATE"]
apiGroups: ["policy.linkerd.io"]
apiVersions: ["*"]
resources:
- authorizationpolicies
- httproutes
- networkauthentications
- meshtlsauthentications
- serverauthorizations
- servers
- operations: ["CREATE", "UPDATE"]
apiGroups: ["gateway.networking.k8s.io"]
apiVersions: ["*"]
resources:
- httproutes
- grpcroutes
sideEffects: None
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: linkerd-policy
labels:
app.kubernetes.io/part-of: Linkerd
linkerd.io/control-plane-component: destination
linkerd.io/control-plane-ns: {{.Release.Namespace}}
{{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
- watch
- apiGroups:
- apps
resources:
- deployments
verbs:
- get
- apiGroups:
- policy.linkerd.io
resources:
- authorizationpolicies
- httproutes
- meshtlsauthentications
- networkauthentications
- servers
- serverauthorizations
verbs:
- get
- list
- watch
- apiGroups:
- gateway.networking.k8s.io
resources:
- httproutes
- grpcroutes
verbs:
- get
- list
- watch
- apiGroups:
- policy.linkerd.io
resources:
- httproutes/status
verbs:
- patch
- apiGroups:
- gateway.networking.k8s.io
resources:
- httproutes/status
- grpcroutes/status
verbs:
- patch
- apiGroups:
- workload.linkerd.io
resources:
- externalworkloads
verbs:
- get
- list
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- get
- patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: linkerd-destination-policy
labels:
app.kubernetes.io/part-of: Linkerd
linkerd.io/control-plane-component: destination
linkerd.io/control-plane-ns: {{.Release.Namespace}}
{{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: linkerd-policy
subjects:
- kind: ServiceAccount
name: linkerd-destination
namespace: {{.Release.Namespace}}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: remote-discovery
namespace: {{.Release.Namespace}}
labels:
app.kubernetes.io/part-of: Linkerd
linkerd.io/control-plane-component: destination
linkerd.io/control-plane-ns: {{.Release.Namespace}}
{{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: linkerd-destination-remote-discovery
namespace: {{.Release.Namespace}}
labels:
app.kubernetes.io/part-of: Linkerd
linkerd.io/control-plane-component: destination
linkerd.io/control-plane-ns: {{.Release.Namespace}}
{{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: remote-discovery
subjects:
- kind: ServiceAccount
name: linkerd-destination
namespace: {{.Release.Namespace}}

View File

@ -0,0 +1,435 @@
---
###
### Destination Controller Service
###
kind: Service
apiVersion: v1
metadata:
name: linkerd-dst
namespace: {{ .Release.Namespace }}
labels:
linkerd.io/control-plane-component: destination
linkerd.io/control-plane-ns: {{.Release.Namespace}}
{{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
annotations:
{{ include "partials.annotations.created-by" . }}
spec:
type: ClusterIP
selector:
linkerd.io/control-plane-component: destination
ports:
- name: grpc
port: 8086
targetPort: 8086
---
kind: Service
apiVersion: v1
metadata:
name: linkerd-dst-headless
namespace: {{ .Release.Namespace }}
labels:
linkerd.io/control-plane-component: destination
linkerd.io/control-plane-ns: {{.Release.Namespace}}
{{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
annotations:
{{ include "partials.annotations.created-by" . }}
spec:
clusterIP: None
selector:
linkerd.io/control-plane-component: destination
ports:
- name: grpc
port: 8086
targetPort: 8086
---
kind: Service
apiVersion: v1
metadata:
name: linkerd-sp-validator
namespace: {{ .Release.Namespace }}
labels:
linkerd.io/control-plane-component: destination
linkerd.io/control-plane-ns: {{.Release.Namespace}}
{{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
annotations:
{{ include "partials.annotations.created-by" . }}
spec:
type: ClusterIP
selector:
linkerd.io/control-plane-component: destination
ports:
- name: sp-validator
port: 443
targetPort: sp-validator
---
kind: Service
apiVersion: v1
metadata:
name: linkerd-policy
namespace: {{ .Release.Namespace }}
labels:
linkerd.io/control-plane-component: destination
linkerd.io/control-plane-ns: {{.Release.Namespace}}
{{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
annotations:
{{ include "partials.annotations.created-by" . }}
spec:
clusterIP: None
selector:
linkerd.io/control-plane-component: destination
ports:
- name: grpc
port: 8090
targetPort: 8090
---
kind: Service
apiVersion: v1
metadata:
name: linkerd-policy-validator
namespace: {{ .Release.Namespace }}
labels:
linkerd.io/control-plane-component: destination
linkerd.io/control-plane-ns: {{.Release.Namespace}}
{{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
annotations:
{{ include "partials.annotations.created-by" . }}
spec:
type: ClusterIP
selector:
linkerd.io/control-plane-component: destination
ports:
- name: policy-https
port: 443
targetPort: policy-https
{{- if .Values.enablePodDisruptionBudget }}
---
kind: PodDisruptionBudget
apiVersion: policy/v1
metadata:
name: linkerd-dst
namespace: {{ .Release.Namespace }}
labels:
linkerd.io/control-plane-component: destination
linkerd.io/control-plane-ns: {{.Release.Namespace}}
{{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
annotations:
{{ include "partials.annotations.created-by" . }}
spec:
maxUnavailable: {{ .Values.controller.podDisruptionBudget.maxUnavailable }}
selector:
matchLabels:
linkerd.io/control-plane-component: destination
{{- end }}
---
{{- $tree := deepCopy . }}
{{ $_ := set $tree.Values.proxy "workloadKind" "deployment" -}}
{{ $_ := set $tree.Values.proxy "component" "linkerd-destination" -}}
{{ $_ := set $tree.Values.proxy "waitBeforeExitSeconds" 0 -}}
{{- if not (empty .Values.destinationProxyResources) }}
{{- $c := dig "cores" .Values.proxy.cores .Values.destinationProxyResources }}
{{- $_ := set $tree.Values.proxy "cores" $c }}
{{- $r := merge .Values.destinationProxyResources .Values.proxy.resources }}
{{- $_ := set $tree.Values.proxy "resources" $r }}
{{- end }}
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
{{ include "partials.annotations.created-by" . }}
labels:
app.kubernetes.io/name: destination
app.kubernetes.io/part-of: Linkerd
app.kubernetes.io/version: {{.Values.linkerdVersion}}
linkerd.io/control-plane-component: destination
linkerd.io/control-plane-ns: {{.Release.Namespace}}
{{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
name: linkerd-destination
namespace: {{ .Release.Namespace }}
spec:
replicas: {{.Values.controllerReplicas}}
revisionHistoryLimit: {{.Values.revisionHistoryLimit}}
selector:
matchLabels:
linkerd.io/control-plane-component: destination
linkerd.io/control-plane-ns: {{.Release.Namespace}}
{{- include "partials.proxy.labels" $tree.Values.proxy | nindent 6}}
{{- if .Values.deploymentStrategy }}
strategy:
{{- with .Values.deploymentStrategy }}{{ toYaml . | trim | nindent 4 }}{{- end }}
{{- end }}
template:
metadata:
annotations:
checksum/config: {{ include (print $.Template.BasePath "/destination-rbac.yaml") . | sha256sum }}
{{ include "partials.annotations.created-by" . }}
{{- include "partials.proxy.annotations" . | nindent 8}}
{{- with .Values.podAnnotations }}{{ toYaml . | trim | nindent 8 }}{{- end }}
config.linkerd.io/default-inbound-policy: "all-unauthenticated"
labels:
linkerd.io/control-plane-component: destination
linkerd.io/control-plane-ns: {{.Release.Namespace}}
linkerd.io/workload-ns: {{.Release.Namespace}}
{{- include "partials.proxy.labels" $tree.Values.proxy | nindent 8}}
{{- with .Values.podLabels }}{{ toYaml . | trim | nindent 8 }}{{- end }}
spec:
{{- with .Values.runtimeClassName }}
runtimeClassName: {{ . | quote }}
{{- end }}
{{- if .Values.tolerations -}}
{{- include "linkerd.tolerations" . | nindent 6 }}
{{- end -}}
{{- include "linkerd.node-selector" . | nindent 6 }}
{{- $_ := set $tree "component" "destination" -}}
{{- include "linkerd.affinity" $tree | nindent 6 }}
containers:
{{- $_ := set $tree.Values.proxy "await" $tree.Values.proxy.await }}
{{- $_ := set $tree.Values.proxy "loadTrustBundleFromConfigMap" true }}
{{- $_ := set $tree.Values.proxy "podInboundPorts" "8086,8090,8443,9443,9990,9996,9997" }}
{{- $_ := set $tree.Values.proxy "outboundDiscoveryCacheUnusedTimeout" "5s" }}
{{- $_ := set $tree.Values.proxy "inboundDiscoveryCacheUnusedTimeout" "90s" }}
{{- /*
The pod needs to accept webhook traffic, and we can't rely on that originating in the
cluster network.
*/}}
{{- $_ := set $tree.Values.proxy "defaultInboundPolicy" "all-unauthenticated" }}
{{- $_ := set $tree.Values.proxy "capabilities" (dict "drop" (list "ALL")) }}
{{- if not $tree.Values.proxy.nativeSidecar }}
- {{- include "partials.proxy" $tree | indent 8 | trimPrefix (repeat 7 " ") }}
{{- end }}
- args:
- destination
- -addr=:8086
- -controller-namespace={{.Release.Namespace}}
- -enable-h2-upgrade={{.Values.enableH2Upgrade}}
- -log-level={{.Values.controllerLogLevel}}
- -log-format={{.Values.controllerLogFormat}}
- -enable-endpoint-slices={{.Values.enableEndpointSlices}}
- -cluster-domain={{.Values.clusterDomain}}
- -identity-trust-domain={{.Values.identityTrustDomain | default .Values.clusterDomain}}
- -default-opaque-ports={{.Values.proxy.opaquePorts}}
- -enable-ipv6={{not .Values.disableIPv6}}
- -enable-pprof={{.Values.enablePprof | default false}}
{{- if (.Values.destinationController).meshedHttp2ClientProtobuf }}
- --meshed-http2-client-params={{ toJson .Values.destinationController.meshedHttp2ClientProtobuf }}
{{- end }}
{{- range (.Values.destinationController).additionalArgs }}
- {{ . }}
{{- end }}
{{- range (.Values.destinationController).experimentalArgs }}
- {{ . }}
{{- end }}
{{- if or (.Values.destinationController).additionalEnv (.Values.destinationController).experimentalEnv }}
env:
{{- with (.Values.destinationController).additionalEnv }}
{{- toYaml . | nindent 8 -}}
{{- end }}
{{- with (.Values.destinationController).experimentalEnv }}
{{- toYaml . | nindent 8 -}}
{{- end }}
{{- end }}
{{- include "partials.linkerd.trace" . | nindent 8 -}}
image: {{.Values.controllerImage}}:{{.Values.controllerImageVersion | default .Values.linkerdVersion}}
imagePullPolicy: {{.Values.imagePullPolicy}}
livenessProbe:
httpGet:
path: /ping
port: 9996
initialDelaySeconds: 10
{{- with (.Values.destinationController.livenessProbe).timeoutSeconds }}
timeoutSeconds: {{ . }}
{{- end }}
name: destination
ports:
- containerPort: 8086
name: grpc
- containerPort: 9996
name: admin-http
readinessProbe:
failureThreshold: 7
httpGet:
path: /ready
port: 9996
{{- with (.Values.destinationController.readinessProbe).timeoutSeconds }}
timeoutSeconds: {{ . }}
{{- end }}
{{- if .Values.destinationResources -}}
{{- include "partials.resources" .Values.destinationResources | nindent 8 }}
{{- end }}
securityContext:
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: {{.Values.controllerUID}}
{{- if ge (int .Values.controllerGID) 0 }}
runAsGroup: {{.Values.controllerGID}}
{{- end }}
allowPrivilegeEscalation: false
seccompProfile:
type: RuntimeDefault
- args:
- sp-validator
- -log-level={{.Values.controllerLogLevel}}
- -log-format={{.Values.controllerLogFormat}}
- -enable-pprof={{.Values.enablePprof | default false}}
{{- if or (.Values.spValidator).additionalEnv (.Values.spValidator).experimentalEnv }}
env:
{{- with (.Values.spValidator).additionalEnv }}
{{- toYaml . | nindent 8 -}}
{{- end }}
{{- with (.Values.spValidator).experimentalEnv }}
{{- toYaml . | nindent 8 -}}
{{- end }}
{{- end }}
image: {{.Values.controllerImage}}:{{.Values.controllerImageVersion | default .Values.linkerdVersion}}
imagePullPolicy: {{.Values.imagePullPolicy}}
livenessProbe:
httpGet:
path: /ping
port: 9997
initialDelaySeconds: 10
{{- with ((.Values.spValidator).livenessProbe).timeoutSeconds }}
timeoutSeconds: {{ . }}
{{- end }}
name: sp-validator
ports:
- containerPort: 8443
name: sp-validator
- containerPort: 9997
name: admin-http
readinessProbe:
failureThreshold: 7
httpGet:
path: /ready
port: 9997
{{- with ((.Values.spValidator).readinessProbe).timeoutSeconds }}
timeoutSeconds: {{ . }}
{{- end }}
{{- if .Values.spValidatorResources -}}
{{- include "partials.resources" .Values.spValidatorResources | nindent 8 }}
{{- end }}
securityContext:
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: {{.Values.controllerUID}}
{{- if ge (int .Values.controllerGID) 0 }}
runAsGroup: {{.Values.controllerGID}}
{{- end }}
allowPrivilegeEscalation: false
seccompProfile:
type: RuntimeDefault
volumeMounts:
- mountPath: /var/run/linkerd/tls
name: sp-tls
readOnly: true
- args:
- --admin-addr={{ if .Values.disableIPv6 }}0.0.0.0{{ else }}[::]{{ end }}:9990
- --control-plane-namespace={{.Release.Namespace}}
- --grpc-addr={{ if .Values.disableIPv6 }}0.0.0.0{{ else }}[::]{{ end }}:8090
- --server-addr={{ if .Values.disableIPv6 }}0.0.0.0{{ else }}[::]{{ end }}:9443
- --server-tls-key=/var/run/linkerd/tls/tls.key
- --server-tls-certs=/var/run/linkerd/tls/tls.crt
- --cluster-networks={{.Values.clusterNetworks}}
- --identity-domain={{.Values.identityTrustDomain | default .Values.clusterDomain}}
- --cluster-domain={{.Values.clusterDomain}}
- --default-policy={{.Values.proxy.defaultInboundPolicy}}
- --log-level={{.Values.policyController.logLevel | default "linkerd=info,warn"}}
- --log-format={{.Values.controllerLogFormat}}
- --default-opaque-ports={{.Values.proxy.opaquePorts}}
{{- if .Values.policyController.probeNetworks }}
- --probe-networks={{.Values.policyController.probeNetworks | join ","}}
{{- end}}
{{- range .Values.policyController.additionalArgs }}
- {{ . }}
{{- end }}
{{- range .Values.policyController.experimentalArgs }}
- {{ . }}
{{- end }}
image: {{.Values.policyController.image.name}}:{{.Values.policyController.image.version | default .Values.linkerdVersion}}
imagePullPolicy: {{.Values.policyController.image.pullPolicy | default .Values.imagePullPolicy}}
livenessProbe:
httpGet:
path: /live
port: admin-http
{{- with (.Values.policyController.livenessProbe).timeoutSeconds }}
timeoutSeconds: {{ . }}
{{- end }}
name: policy
ports:
- containerPort: 8090
name: grpc
- containerPort: 9990
name: admin-http
- containerPort: 9443
name: policy-https
readinessProbe:
failureThreshold: 7
httpGet:
path: /ready
port: admin-http
initialDelaySeconds: 10
{{- with (.Values.policyController.readinessProbe).timeoutSeconds }}
timeoutSeconds: {{ . }}
{{- end }}
{{- if .Values.policyController.resources }}
{{- include "partials.resources" .Values.policyController.resources | nindent 8 }}
{{- end }}
securityContext:
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: {{.Values.controllerUID}}
{{- if ge (int .Values.controllerGID) 0 }}
runAsGroup: {{.Values.controllerGID}}
{{- end }}
allowPrivilegeEscalation: false
seccompProfile:
type: RuntimeDefault
volumeMounts:
- mountPath: /var/run/linkerd/tls
name: policy-tls
readOnly: true
initContainers:
{{ if .Values.cniEnabled -}}
- {{- include "partials.network-validator" $tree | indent 8 | trimPrefix (repeat 7 " ") }}
{{ else -}}
{{- /*
The destination controller needs to connect to the Kubernetes API before the proxy is able
to proxy requests, so we always skip these connections.
*/}}
{{- $_ := set $tree.Values.proxyInit "ignoreOutboundPorts" .Values.proxyInit.kubeAPIServerPorts -}}
- {{- include "partials.proxy-init" $tree | indent 8 | trimPrefix (repeat 7 " ") }}
{{ end -}}
{{- if $tree.Values.proxy.nativeSidecar }}
{{- $_ := set $tree.Values.proxy "startupProbeInitialDelaySeconds" 35 }}
{{- $_ := set $tree.Values.proxy "startupProbePeriodSeconds" 5 }}
{{- $_ := set $tree.Values.proxy "startupProbeFailureThreshold" 20 }}
- {{- include "partials.proxy" $tree | indent 8 | trimPrefix (repeat 7 " ") }}
{{ end -}}
{{- if .Values.priorityClassName -}}
priorityClassName: {{ .Values.priorityClassName }}
{{ end -}}
securityContext:
seccompProfile:
type: RuntimeDefault
serviceAccountName: linkerd-destination
volumes:
- name: sp-tls
secret:
secretName: linkerd-sp-validator-k8s-tls
- name: policy-tls
secret:
secretName: linkerd-policy-validator-k8s-tls
{{ if not .Values.cniEnabled -}}
- {{- include "partials.proxyInit.volumes.xtables" . | indent 8 | trimPrefix (repeat 7 " ") }}
{{ end -}}
{{if .Values.identity.serviceAccountTokenProjection -}}
- {{- include "partials.proxy.volumes.service-account-token" . | indent 8 | trimPrefix (repeat 7 " ") }}
{{ end -}}
- {{- include "partials.proxy.volumes.identity" . | indent 8 | trimPrefix (repeat 7 " ") }}

View File

@ -0,0 +1,78 @@
{{ if not .Values.disableHeartBeat -}}
---
###
### Heartbeat RBAC
###
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: linkerd-heartbeat
namespace: {{ .Release.Namespace }}
labels:
linkerd.io/control-plane-ns: {{.Release.Namespace}}
{{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get"]
resourceNames: ["linkerd-config"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: linkerd-heartbeat
namespace: {{ .Release.Namespace }}
labels:
linkerd.io/control-plane-ns: {{.Release.Namespace}}
{{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
roleRef:
kind: Role
name: linkerd-heartbeat
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: linkerd-heartbeat
namespace: {{.Release.Namespace}}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: linkerd-heartbeat
labels:
linkerd.io/control-plane-ns: {{.Release.Namespace}}
{{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
rules:
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["list"]
- apiGroups: ["linkerd.io"]
resources: ["serviceprofiles"]
verbs: ["list"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: linkerd-heartbeat
labels:
linkerd.io/control-plane-ns: {{.Release.Namespace}}
{{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
roleRef:
kind: ClusterRole
name: linkerd-heartbeat
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: linkerd-heartbeat
namespace: {{.Release.Namespace}}
---
kind: ServiceAccount
apiVersion: v1
metadata:
name: linkerd-heartbeat
namespace: {{ .Release.Namespace }}
labels:
linkerd.io/control-plane-component: heartbeat
linkerd.io/control-plane-ns: {{.Release.Namespace}}
{{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
{{- include "partials.image-pull-secrets" .Values.imagePullSecrets }}
{{- end }}

View File

@ -0,0 +1,94 @@
{{ if not .Values.disableHeartBeat -}}
---
###
### Heartbeat
###
apiVersion: batch/v1
kind: CronJob
metadata:
name: linkerd-heartbeat
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: heartbeat
app.kubernetes.io/part-of: Linkerd
app.kubernetes.io/version: {{.Values.linkerdVersion}}
linkerd.io/control-plane-component: heartbeat
linkerd.io/control-plane-ns: {{.Release.Namespace}}
{{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
annotations:
{{ include "partials.annotations.created-by" . }}
spec:
concurrencyPolicy: Replace
{{ if .Values.heartbeatSchedule -}}
schedule: "{{.Values.heartbeatSchedule}}"
{{ else -}}
schedule: "{{ dateInZone "04 15 * * *" (now | mustDateModify "+10m") "UTC"}}"
{{ end -}}
successfulJobsHistoryLimit: 0
jobTemplate:
spec:
template:
metadata:
labels:
linkerd.io/control-plane-component: heartbeat
linkerd.io/workload-ns: {{.Release.Namespace}}
{{- with .Values.podLabels }}{{ toYaml . | trim | nindent 12 }}{{- end }}
annotations:
{{ include "partials.annotations.created-by" . }}
{{- with .Values.podAnnotations }}{{ toYaml . | trim | nindent 12 }}{{- end }}
spec:
{{- if .Values.priorityClassName }}
priorityClassName: {{ .Values.priorityClassName }}
{{- end -}}
{{- with .Values.runtimeClassName }}
runtimeClassName: {{ . | quote }}
{{- end }}
{{- if .Values.tolerations -}}
{{- include "linkerd.tolerations" . | nindent 10 }}
{{- end -}}
{{- include "linkerd.node-selector" . | nindent 10 }}
securityContext:
seccompProfile:
type: RuntimeDefault
serviceAccountName: linkerd-heartbeat
restartPolicy: Never
containers:
- name: heartbeat
image: {{.Values.controllerImage}}:{{.Values.controllerImageVersion | default .Values.linkerdVersion}}
imagePullPolicy: {{.Values.imagePullPolicy}}
env:
- name: LINKERD_DISABLED
value: "the heartbeat controller does not use the proxy"
{{- with (.Values.heartbeat).additionalEnv }}
{{- toYaml . | nindent 12 -}}
{{- end }}
{{- with (.Values.heartbeat).experimentalEnv }}
{{- toYaml . | nindent 12 -}}
{{- end }}
args:
- "heartbeat"
- "-controller-namespace={{.Release.Namespace}}"
- "-log-level={{.Values.controllerLogLevel}}"
- "-log-format={{.Values.controllerLogFormat}}"
{{- if .Values.prometheusUrl }}
- "-prometheus-url={{.Values.prometheusUrl}}"
{{- else }}
- "-prometheus-url=http://prometheus.linkerd-viz.svc.{{.Values.clusterDomain}}:9090"
{{- end }}
{{- if .Values.heartbeatResources -}}
{{- include "partials.resources" .Values.heartbeatResources | nindent 12 }}
{{- end }}
securityContext:
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: {{.Values.controllerUID}}
{{- if ge (int .Values.controllerGID) 0 }}
runAsGroup: {{.Values.controllerGID}}
{{- end }}
allowPrivilegeEscalation: false
seccompProfile:
type: RuntimeDefault
{{- end }}

View File

@ -0,0 +1,49 @@
---
###
### Identity Controller Service RBAC
###
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: linkerd-{{.Release.Namespace}}-identity
labels:
linkerd.io/control-plane-component: identity
linkerd.io/control-plane-ns: {{.Release.Namespace}}
{{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
rules:
- apiGroups: ["authentication.k8s.io"]
resources: ["tokenreviews"]
verbs: ["create"]
# TODO(ver) Restrict this to the Linkerd namespace. See
# https://github.com/linkerd/linkerd2/issues/9367
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: linkerd-{{.Release.Namespace}}-identity
labels:
linkerd.io/control-plane-component: identity
linkerd.io/control-plane-ns: {{.Release.Namespace}}
{{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: linkerd-{{.Release.Namespace}}-identity
subjects:
- kind: ServiceAccount
name: linkerd-identity
namespace: {{.Release.Namespace}}
---
kind: ServiceAccount
apiVersion: v1
metadata:
name: linkerd-identity
namespace: {{ .Release.Namespace }}
labels:
linkerd.io/control-plane-component: identity
linkerd.io/control-plane-ns: {{.Release.Namespace}}
{{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
{{- include "partials.image-pull-secrets" .Values.imagePullSecrets }}

View File

@ -0,0 +1,273 @@
{{if .Values.identity -}}
---
###
### Identity Controller Service
###
{{ if and (.Values.identity.issuer) (eq .Values.identity.issuer.scheme "linkerd.io/tls") -}}
---
kind: Secret
apiVersion: v1
metadata:
name: linkerd-identity-issuer
namespace: {{ .Release.Namespace }}
labels:
linkerd.io/control-plane-component: identity
linkerd.io/control-plane-ns: {{.Release.Namespace}}
{{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
annotations:
{{ include "partials.annotations.created-by" . }}
data:
crt.pem: {{b64enc (required "Please provide the identity issuer certificate" .Values.identity.issuer.tls.crtPEM | trim)}}
key.pem: {{b64enc (required "Please provide the identity issue private key" .Values.identity.issuer.tls.keyPEM | trim)}}
{{- end}}
{{ if not (.Values.identity.externalCA) -}}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: linkerd-identity-trust-roots
namespace: {{ .Release.Namespace }}
labels:
linkerd.io/control-plane-component: identity
linkerd.io/control-plane-ns: {{.Release.Namespace}}
{{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
annotations:
{{ include "partials.annotations.created-by" . }}
data:
ca-bundle.crt: |-{{.Values.identityTrustAnchorsPEM | trim | nindent 4}}
{{- end}}
---
kind: Service
apiVersion: v1
metadata:
name: linkerd-identity
namespace: {{ .Release.Namespace }}
labels:
linkerd.io/control-plane-component: identity
linkerd.io/control-plane-ns: {{.Release.Namespace}}
{{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
annotations:
{{ include "partials.annotations.created-by" . }}
spec:
type: ClusterIP
selector:
linkerd.io/control-plane-component: identity
ports:
- name: grpc
port: 8080
targetPort: 8080
---
kind: Service
apiVersion: v1
metadata:
name: linkerd-identity-headless
namespace: {{ .Release.Namespace }}
labels:
linkerd.io/control-plane-component: identity
linkerd.io/control-plane-ns: {{.Release.Namespace}}
{{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
annotations:
{{ include "partials.annotations.created-by" . }}
spec:
clusterIP: None
selector:
linkerd.io/control-plane-component: identity
ports:
- name: grpc
port: 8080
targetPort: 8080
{{- if .Values.enablePodDisruptionBudget }}
---
kind: PodDisruptionBudget
apiVersion: policy/v1
metadata:
name: linkerd-identity
namespace: {{ .Release.Namespace }}
labels:
linkerd.io/control-plane-component: identity
linkerd.io/control-plane-ns: {{.Release.Namespace}}
{{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
annotations:
{{ include "partials.annotations.created-by" . }}
spec:
maxUnavailable: {{ .Values.controller.podDisruptionBudget.maxUnavailable }}
selector:
matchLabels:
linkerd.io/control-plane-component: identity
{{- end }}
---
{{- $tree := deepCopy . }}
{{ $_ := set $tree.Values.proxy "workloadKind" "deployment" -}}
{{ $_ := set $tree.Values.proxy "component" "linkerd-identity" -}}
{{ $_ := set $tree.Values.proxy "waitBeforeExitSeconds" 0 -}}
{{- if not (empty .Values.identityProxyResources) }}
{{- $c := dig "cores" .Values.proxy.cores .Values.identityProxyResources }}
{{- $_ := set $tree.Values.proxy "cores" $c }}
{{- $r := merge .Values.identityProxyResources .Values.proxy.resources }}
{{- $_ := set $tree.Values.proxy "resources" $r }}
{{- end }}
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
{{ include "partials.annotations.created-by" . }}
labels:
app.kubernetes.io/name: identity
app.kubernetes.io/part-of: Linkerd
app.kubernetes.io/version: {{.Values.linkerdVersion}}
linkerd.io/control-plane-component: identity
linkerd.io/control-plane-ns: {{.Release.Namespace}}
{{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
name: linkerd-identity
namespace: {{ .Release.Namespace }}
spec:
replicas: {{.Values.controllerReplicas}}
revisionHistoryLimit: {{.Values.revisionHistoryLimit}}
selector:
matchLabels:
linkerd.io/control-plane-component: identity
linkerd.io/control-plane-ns: {{.Release.Namespace}}
{{- include "partials.proxy.labels" $tree.Values.proxy | nindent 6}}
{{- if .Values.deploymentStrategy }}
strategy:
{{- with .Values.deploymentStrategy }}{{ toYaml . | trim | nindent 4 }}{{- end }}
{{- end }}
template:
metadata:
annotations:
{{ include "partials.annotations.created-by" . }}
{{- include "partials.proxy.annotations" . | nindent 8}}
{{- with .Values.podAnnotations }}{{ toYaml . | trim | nindent 8 }}{{- end }}
config.linkerd.io/default-inbound-policy: "all-unauthenticated"
labels:
linkerd.io/control-plane-component: identity
linkerd.io/control-plane-ns: {{.Release.Namespace}}
linkerd.io/workload-ns: {{.Release.Namespace}}
{{- include "partials.proxy.labels" $tree.Values.proxy | nindent 8}}
{{- with .Values.podLabels }}{{ toYaml . | trim | nindent 8 }}{{- end }}
spec:
{{- with .Values.runtimeClassName }}
runtimeClassName: {{ . | quote }}
{{- end }}
{{- if .Values.tolerations -}}
{{- include "linkerd.tolerations" . | nindent 6 }}
{{- end -}}
{{- include "linkerd.node-selector" . | nindent 6 }}
{{- $_ := set $tree "component" "identity" -}}
{{- include "linkerd.affinity" $tree | nindent 6 }}
containers:
- args:
- identity
- -log-level={{.Values.controllerLogLevel}}
- -log-format={{.Values.controllerLogFormat}}
- -controller-namespace={{.Release.Namespace}}
- -identity-trust-domain={{.Values.identityTrustDomain | default .Values.clusterDomain}}
- -identity-issuance-lifetime={{.Values.identity.issuer.issuanceLifetime}}
- -identity-clock-skew-allowance={{.Values.identity.issuer.clockSkewAllowance}}
- -identity-scheme={{.Values.identity.issuer.scheme}}
- -enable-pprof={{.Values.enablePprof | default false}}
- -kube-apiclient-qps={{.Values.identity.kubeAPI.clientQPS}}
- -kube-apiclient-burst={{.Values.identity.kubeAPI.clientBurst}}
{{- include "partials.linkerd.trace" . | nindent 8 -}}
env:
- name: LINKERD_DISABLED
value: "linkerd-await cannot block the identity controller"
{{- with (.Values.identity).additionalEnv }}
{{- toYaml . | nindent 8 -}}
{{- end }}
{{- with (.Values.identity).experimentalEnv }}
{{- toYaml . | nindent 8 -}}
{{- end }}
image: {{.Values.controllerImage}}:{{.Values.controllerImageVersion | default .Values.linkerdVersion}}
imagePullPolicy: {{.Values.imagePullPolicy}}
livenessProbe:
httpGet:
path: /ping
port: 9990
initialDelaySeconds: 10
{{- with (.Values.identity.livenessProbe).timeoutSeconds }}
timeoutSeconds: {{ . }}
{{- end }}
name: identity
ports:
- containerPort: 8080
name: grpc
- containerPort: 9990
name: admin-http
readinessProbe:
failureThreshold: 7
httpGet:
path: /ready
port: 9990
{{- with (.Values.identity.readinessProbe).timeoutSeconds }}
timeoutSeconds: {{ . }}
{{- end }}
{{- if .Values.identityResources -}}
{{- include "partials.resources" .Values.identityResources | nindent 8 }}
{{- end }}
securityContext:
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: {{.Values.controllerUID}}
{{- if ge (int .Values.controllerGID) 0 }}
runAsGroup: {{.Values.controllerGID}}
{{- end }}
allowPrivilegeEscalation: false
seccompProfile:
type: RuntimeDefault
volumeMounts:
- mountPath: /var/run/linkerd/identity/issuer
name: identity-issuer
- mountPath: /var/run/linkerd/identity/trust-roots/
name: trust-roots
{{- $_ := set $tree.Values.proxy "await" false }}
{{- $_ := set $tree.Values.proxy "loadTrustBundleFromConfigMap" true }}
{{- $_ := set $tree.Values.proxy "podInboundPorts" "8080,9990" }}
{{- $_ := set $tree.Values.proxy "nativeSidecar" false }}
{{- /*
The identity controller cannot discover policies, so we configure it with defaults that
enforce TLS on the identity service.
*/}}
{{- $_ := set $tree.Values.proxy "defaultInboundPolicy" "all-unauthenticated" }}
{{- $_ := set $tree.Values.proxy "requireTLSOnInboundPorts" "8080" }}
{{- $_ := set $tree.Values.proxy "capabilities" (dict "drop" (list "ALL")) }}
{{- $_ := set $tree.Values.proxy "outboundDiscoveryCacheUnusedTimeout" "5s" }}
{{- $_ := set $tree.Values.proxy "inboundDiscoveryCacheUnusedTimeout" "90s" }}
- {{- include "partials.proxy" $tree | indent 8 | trimPrefix (repeat 7 " ") }}
initContainers:
{{ if .Values.cniEnabled -}}
- {{- include "partials.network-validator" $tree | indent 8 | trimPrefix (repeat 7 " ") }}
{{ else -}}
{{- /*
The identity controller needs to connect to the Kubernetes API before the proxy is able to
proxy requests, so we always skip these connections. The identity controller makes no other
outbound connections (so it's not important to persist any other skip ports here)
*/}}
{{- $_ := set $tree.Values.proxyInit "ignoreOutboundPorts" .Values.proxyInit.kubeAPIServerPorts -}}
- {{- include "partials.proxy-init" $tree | indent 8 | trimPrefix (repeat 7 " ") }}
{{ end -}}
{{- if .Values.priorityClassName -}}
priorityClassName: {{ .Values.priorityClassName }}
{{ end -}}
securityContext:
seccompProfile:
type: RuntimeDefault
serviceAccountName: linkerd-identity
volumes:
- name: identity-issuer
secret:
secretName: linkerd-identity-issuer
- configMap:
name: linkerd-identity-trust-roots
name: trust-roots
{{ if not .Values.cniEnabled -}}
- {{- include "partials.proxyInit.volumes.xtables" . | indent 8 | trimPrefix (repeat 7 " ") }}
{{ end -}}
{{if .Values.identity.serviceAccountTokenProjection -}}
- {{- include "partials.proxy.volumes.service-account-token" . | indent 8 | trimPrefix (repeat 7 " ") }}
{{ end -}}
- {{- include "partials.proxy.volumes.identity" . | indent 8 | trimPrefix (repeat 7 " ") }}
{{end -}}

View File

@ -0,0 +1,18 @@
{{- if eq .Release.Service "CLI" -}}
---
###
### Linkerd Namespace
###
kind: Namespace
apiVersion: v1
metadata:
name: {{ .Release.Namespace }}
annotations:
linkerd.io/inject: disabled
labels:
linkerd.io/is-control-plane: "true"
config.linkerd.io/admission-webhooks: disabled
linkerd.io/control-plane-ns: {{.Release.Namespace}}
{{- /* linkerd-init requires extended capabilities and so requires priviledged mode */}}
pod-security.kubernetes.io/enforce: {{ ternary "restricted" "privileged" .Values.cniEnabled }}
{{ end -}}

View File

@ -0,0 +1,128 @@
{{- $podMonitor := .Values.podMonitor -}}
{{- if and $podMonitor.enabled $podMonitor.controller.enabled }}
---
###
### Prometheus Operator PodMonitor for Linkerd control-plane
###
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: "linkerd-controller"
namespace: {{ .Release.Namespace }}
labels:
linkerd.io/control-plane-ns: {{ .Release.Namespace }}
{{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
{{- with .Values.podMonitor.labels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
annotations:
{{ include "partials.annotations.created-by" . }}
spec:
namespaceSelector: {{ tpl .Values.podMonitor.controller.namespaceSelector . | nindent 4 }}
selector:
matchLabels: {}
podMetricsEndpoints:
- interval: {{ $podMonitor.scrapeInterval }}
scrapeTimeout: {{ $podMonitor.scrapeTimeout }}
relabelings:
- sourceLabels:
- __meta_kubernetes_pod_container_port_name
action: keep
regex: admin-http
- sourceLabels:
- __meta_kubernetes_pod_container_name
action: replace
targetLabel: component
{{- end }}
{{- if and $podMonitor.enabled $podMonitor.serviceMirror.enabled }}
---
###
### Prometheus Operator PodMonitor for Linkerd Service Mirror (multi-cluster)
###
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: "linkerd-service-mirror"
namespace: {{ .Release.Namespace }}
labels:
linkerd.io/control-plane-ns: {{ .Release.Namespace }}
{{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
{{- with .Values.podMonitor.labels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
annotations:
{{ include "partials.annotations.created-by" . }}
spec:
namespaceSelector:
any: true
selector:
matchLabels: {}
podMetricsEndpoints:
- interval: {{ $podMonitor.scrapeInterval }}
scrapeTimeout: {{ $podMonitor.scrapeTimeout }}
relabelings:
- sourceLabels:
- __meta_kubernetes_pod_label_linkerd_io_control_plane_component
- __meta_kubernetes_pod_container_port_name
action: keep
regex: linkerd-service-mirror;admin-http$
- sourceLabels:
- __meta_kubernetes_pod_container_name
action: replace
targetLabel: component
{{- end }}
{{- if and $podMonitor.enabled $podMonitor.proxy.enabled }}
---
###
### Prometheus Operator PodMonitor Linkerd data-plane
###
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: "linkerd-proxy"
namespace: {{ .Release.Namespace }}
labels:
linkerd.io/control-plane-ns: {{ .Release.Namespace }}
{{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
{{- with .Values.podMonitor.labels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
annotations:
{{ include "partials.annotations.created-by" . }}
spec:
namespaceSelector:
any: true
selector:
matchLabels: {}
podMetricsEndpoints:
- interval: {{ $podMonitor.scrapeInterval }}
scrapeTimeout: {{ $podMonitor.scrapeTimeout }}
relabelings:
- sourceLabels:
- __meta_kubernetes_pod_container_name
- __meta_kubernetes_pod_container_port_name
- __meta_kubernetes_pod_label_linkerd_io_control_plane_ns
action: keep
regex: ^linkerd-proxy;linkerd-admin;{{ .Release.Namespace }}$
- sourceLabels: [ __meta_kubernetes_namespace ]
action: replace
targetLabel: namespace
- sourceLabels: [ __meta_kubernetes_pod_name ]
action: replace
targetLabel: pod
- sourceLabels: [ __meta_kubernetes_pod_label_linkerd_io_proxy_job ]
action: replace
targetLabel: k8s_job
- action: labeldrop
regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job
- action: labelmap
regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+)
- action: labeldrop
regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+)
- action: labelmap
regex: __meta_kubernetes_pod_label_linkerd_io_(.+)
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
replacement: __tmp_pod_label_$1
- action: labelmap
regex: __tmp_pod_label_linkerd_io_(.+)
replacement: __tmp_pod_label_$1
- action: labeldrop
regex: __tmp_pod_label_linkerd_io_(.+)
- action: labelmap
regex: __tmp_pod_label_(.+)
{{- end }}

View File

@ -0,0 +1,120 @@
---
###
### Proxy Injector RBAC
###
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: linkerd-{{.Release.Namespace}}-proxy-injector
labels:
linkerd.io/control-plane-component: proxy-injector
linkerd.io/control-plane-ns: {{.Release.Namespace}}
{{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
rules:
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "patch"]
- apiGroups: [""]
resources: ["namespaces", "replicationcontrollers"]
verbs: ["list", "get", "watch"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["list", "watch"]
- apiGroups: ["extensions", "apps"]
resources: ["deployments", "replicasets", "daemonsets", "statefulsets"]
verbs: ["list", "get", "watch"]
- apiGroups: ["extensions", "batch"]
resources: ["cronjobs", "jobs"]
verbs: ["list", "get", "watch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: linkerd-{{.Release.Namespace}}-proxy-injector
labels:
linkerd.io/control-plane-component: proxy-injector
linkerd.io/control-plane-ns: {{.Release.Namespace}}
{{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
subjects:
- kind: ServiceAccount
name: linkerd-proxy-injector
namespace: {{.Release.Namespace}}
apiGroup: ""
roleRef:
kind: ClusterRole
name: linkerd-{{.Release.Namespace}}-proxy-injector
apiGroup: rbac.authorization.k8s.io
---
kind: ServiceAccount
apiVersion: v1
metadata:
name: linkerd-proxy-injector
namespace: {{ .Release.Namespace }}
labels:
linkerd.io/control-plane-component: proxy-injector
linkerd.io/control-plane-ns: {{.Release.Namespace}}
{{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
{{- include "partials.image-pull-secrets" .Values.imagePullSecrets }}
---
{{- $host := printf "linkerd-proxy-injector.%s.svc" .Release.Namespace }}
{{- $ca := genSelfSignedCert $host (list) (list $host) 365 }}
{{- if (not .Values.proxyInjector.externalSecret) }}
kind: Secret
apiVersion: v1
metadata:
name: linkerd-proxy-injector-k8s-tls
namespace: {{ .Release.Namespace }}
labels:
linkerd.io/control-plane-component: proxy-injector
linkerd.io/control-plane-ns: {{.Release.Namespace}}
{{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
annotations:
{{ include "partials.annotations.created-by" . }}
type: kubernetes.io/tls
data:
tls.crt: {{ ternary (b64enc (trim $ca.Cert)) (b64enc (trim .Values.proxyInjector.crtPEM)) (empty .Values.proxyInjector.crtPEM) }}
tls.key: {{ ternary (b64enc (trim $ca.Key)) (b64enc (trim .Values.proxyInjector.keyPEM)) (empty .Values.proxyInjector.keyPEM) }}
---
{{- end }}
{{- include "linkerd.webhook.validation" .Values.proxyInjector }}
apiVersion: admissionregistration.k8s.io/v1
kind: MutatingWebhookConfiguration
metadata:
name: linkerd-proxy-injector-webhook-config
{{- if or (.Values.proxyInjector.injectCaFrom) (.Values.proxyInjector.injectCaFromSecret) }}
annotations:
{{- if .Values.proxyInjector.injectCaFrom }}
cert-manager.io/inject-ca-from: {{ .Values.proxyInjector.injectCaFrom }}
{{- end }}
{{- if .Values.proxyInjector.injectCaFromSecret }}
cert-manager.io/inject-ca-from-secret: {{ .Values.proxyInjector.injectCaFromSecret }}
{{- end }}
{{- end }}
labels:
linkerd.io/control-plane-component: proxy-injector
linkerd.io/control-plane-ns: {{.Release.Namespace}}
{{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
webhooks:
- name: linkerd-proxy-injector.linkerd.io
namespaceSelector:
{{- toYaml .Values.proxyInjector.namespaceSelector | trim | nindent 4 }}
objectSelector:
{{- toYaml .Values.proxyInjector.objectSelector | trim | nindent 4 }}
clientConfig:
service:
name: linkerd-proxy-injector
namespace: {{ .Release.Namespace }}
path: "/"
{{- if and (empty .Values.proxyInjector.injectCaFrom) (empty .Values.proxyInjector.injectCaFromSecret) }}
caBundle: {{ ternary (b64enc (trim $ca.Cert)) (b64enc (trim .Values.proxyInjector.caBundle)) (empty .Values.proxyInjector.caBundle) }}
{{- end }}
failurePolicy: {{.Values.webhookFailurePolicy}}
admissionReviewVersions: ["v1", "v1beta1"]
rules:
- operations: [ "CREATE" ]
apiGroups: [""]
apiVersions: ["v1"]
resources: ["pods", "services"]
scope: "Namespaced"
sideEffects: None
timeoutSeconds: {{ .Values.proxyInjector.timeoutSeconds | default 10 }}

View File

@ -0,0 +1,222 @@
---
###
### Proxy Injector
###
{{- $tree := deepCopy . }}
{{ $_ := set $tree.Values.proxy "workloadKind" "deployment" -}}
{{ $_ := set $tree.Values.proxy "component" "linkerd-proxy-injector" -}}
{{ $_ := set $tree.Values.proxy "waitBeforeExitSeconds" 0 -}}
{{- if not (empty .Values.proxyInjectorProxyResources) }}
{{- $c := dig "cores" .Values.proxy.cores .Values.proxyInjectorProxyResources }}
{{- $_ := set $tree.Values.proxy "cores" $c }}
{{- $r := merge .Values.proxyInjectorProxyResources .Values.proxy.resources }}
{{- $_ := set $tree.Values.proxy "resources" $r }}
{{- end }}
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
{{ include "partials.annotations.created-by" . }}
labels:
app.kubernetes.io/name: proxy-injector
app.kubernetes.io/part-of: Linkerd
app.kubernetes.io/version: {{.Values.linkerdVersion}}
linkerd.io/control-plane-component: proxy-injector
linkerd.io/control-plane-ns: {{.Release.Namespace}}
{{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
name: linkerd-proxy-injector
namespace: {{ .Release.Namespace }}
spec:
replicas: {{.Values.controllerReplicas}}
revisionHistoryLimit: {{.Values.revisionHistoryLimit}}
selector:
matchLabels:
linkerd.io/control-plane-component: proxy-injector
{{- if .Values.deploymentStrategy }}
strategy:
{{- with .Values.deploymentStrategy }}{{ toYaml . | trim | nindent 4 }}{{- end }}
{{- end }}
template:
metadata:
annotations:
checksum/config: {{ include (print $.Template.BasePath "/proxy-injector-rbac.yaml") . | sha256sum }}
{{ include "partials.annotations.created-by" . }}
{{- include "partials.proxy.annotations" . | nindent 8}}
{{- with .Values.podAnnotations }}{{ toYaml . | trim | nindent 8 }}{{- end }}
config.linkerd.io/opaque-ports: "8443"
config.linkerd.io/default-inbound-policy: "all-unauthenticated"
labels:
linkerd.io/control-plane-component: proxy-injector
linkerd.io/control-plane-ns: {{.Release.Namespace}}
linkerd.io/workload-ns: {{.Release.Namespace}}
{{- include "partials.proxy.labels" $tree.Values.proxy | nindent 8}}
{{- with .Values.podLabels }}{{ toYaml . | trim | nindent 8 }}{{- end }}
spec:
{{- with .Values.runtimeClassName }}
runtimeClassName: {{ . | quote }}
{{- end }}
{{- if .Values.tolerations -}}
{{- include "linkerd.tolerations" . | nindent 6 }}
{{- end -}}
{{- include "linkerd.node-selector" . | nindent 6 }}
{{- $_ := set $tree "component" "proxy-injector" -}}
{{- include "linkerd.affinity" $tree | nindent 6 }}
containers:
{{- $_ := set $tree.Values.proxy "await" $tree.Values.proxy.await }}
{{- $_ := set $tree.Values.proxy "loadTrustBundleFromConfigMap" true }}
{{- $_ := set $tree.Values.proxy "podInboundPorts" "8443,9995" }}
{{- /*
The pod needs to accept webhook traffic, and we can't rely on that originating in the
cluster network.
*/}}
{{- $_ := set $tree.Values.proxy "defaultInboundPolicy" "all-unauthenticated" }}
{{- $_ := set $tree.Values.proxy "capabilities" (dict "drop" (list "ALL")) }}
{{- $_ := set $tree.Values.proxy "outboundDiscoveryCacheUnusedTimeout" "5s" }}
{{- $_ := set $tree.Values.proxy "inboundDiscoveryCacheUnusedTimeout" "90s" }}
{{- if not $tree.Values.proxy.nativeSidecar }}
- {{- include "partials.proxy" $tree | indent 8 | trimPrefix (repeat 7 " ") }}
{{- end }}
- args:
- proxy-injector
- -log-level={{.Values.controllerLogLevel}}
- -log-format={{.Values.controllerLogFormat}}
- -linkerd-namespace={{.Release.Namespace}}
- -enable-pprof={{.Values.enablePprof | default false}}
{{- if or (.Values.proxyInjector).additionalEnv (.Values.proxyInjector).experimentalEnv }}
env:
{{- with (.Values.proxyInjector).additionalEnv }}
{{- toYaml . | nindent 8 -}}
{{- end }}
{{- with (.Values.proxyInjector).experimentalEnv }}
{{- toYaml . | nindent 8 -}}
{{- end }}
{{- end }}
image: {{.Values.controllerImage}}:{{.Values.controllerImageVersion | default .Values.linkerdVersion}}
imagePullPolicy: {{.Values.imagePullPolicy}}
livenessProbe:
httpGet:
path: /ping
port: 9995
initialDelaySeconds: 10
{{- with (.Values.proxyInjector.livenessProbe).timeoutSeconds }}
timeoutSeconds: {{ . }}
{{- end }}
name: proxy-injector
ports:
- containerPort: 8443
name: proxy-injector
- containerPort: 9995
name: admin-http
readinessProbe:
failureThreshold: 7
httpGet:
path: /ready
port: 9995
{{- with (.Values.proxyInjector.readinessProbe).timeoutSeconds }}
timeoutSeconds: {{ . }}
{{- end }}
{{- if .Values.proxyInjectorResources -}}
{{- include "partials.resources" .Values.proxyInjectorResources | nindent 8 }}
{{- end }}
securityContext:
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: {{.Values.controllerUID}}
{{- if ge (int .Values.controllerGID) 0 }}
runAsGroup: {{.Values.controllerGID}}
{{- end }}
allowPrivilegeEscalation: false
seccompProfile:
type: RuntimeDefault
volumeMounts:
- mountPath: /var/run/linkerd/config
name: config
- mountPath: /var/run/linkerd/identity/trust-roots
name: trust-roots
- mountPath: /var/run/linkerd/tls
name: tls
readOnly: true
initContainers:
{{ if .Values.cniEnabled -}}
- {{- include "partials.network-validator" $tree | indent 8 | trimPrefix (repeat 7 " ") }}
{{ else -}}
{{- /*
The controller needs to connect to the Kubernetes API. There's no reason
to put the proxy in the way of that.
*/}}
{{- $_ := set $tree.Values.proxyInit "ignoreOutboundPorts" .Values.proxyInit.kubeAPIServerPorts -}}
- {{- include "partials.proxy-init" $tree | indent 8 | trimPrefix (repeat 7 " ") }}
{{ end -}}
{{- if $tree.Values.proxy.nativeSidecar }}
{{- $_ := set $tree.Values.proxy "startupProbeInitialDelaySeconds" 35 }}
{{- $_ := set $tree.Values.proxy "startupProbePeriodSeconds" 5 }}
{{- $_ := set $tree.Values.proxy "startupProbeFailureThreshold" 20 }}
- {{- include "partials.proxy" $tree | indent 8 | trimPrefix (repeat 7 " ") }}
{{ end -}}
{{- if .Values.priorityClassName -}}
priorityClassName: {{ .Values.priorityClassName }}
{{ end -}}
securityContext:
seccompProfile:
type: RuntimeDefault
serviceAccountName: linkerd-proxy-injector
volumes:
- configMap:
name: linkerd-config
name: config
- configMap:
name: linkerd-identity-trust-roots
name: trust-roots
- name: tls
secret:
secretName: linkerd-proxy-injector-k8s-tls
{{ if not .Values.cniEnabled -}}
- {{- include "partials.proxyInit.volumes.xtables" . | indent 8 | trimPrefix (repeat 7 " ") }}
{{ end -}}
{{if .Values.identity.serviceAccountTokenProjection -}}
- {{- include "partials.proxy.volumes.service-account-token" . | indent 8 | trimPrefix (repeat 7 " ") }}
{{ end -}}
- {{- include "partials.proxy.volumes.identity" . | indent 8 | trimPrefix (repeat 7 " ") }}
---
kind: Service
apiVersion: v1
metadata:
name: linkerd-proxy-injector
namespace: {{ .Release.Namespace }}
labels:
linkerd.io/control-plane-component: proxy-injector
linkerd.io/control-plane-ns: {{.Release.Namespace}}
{{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
annotations:
{{ include "partials.annotations.created-by" . }}
config.linkerd.io/opaque-ports: "443"
spec:
type: ClusterIP
selector:
linkerd.io/control-plane-component: proxy-injector
ports:
- name: proxy-injector
port: 443
targetPort: proxy-injector
{{- if .Values.enablePodDisruptionBudget }}
---
kind: PodDisruptionBudget
apiVersion: policy/v1
metadata:
name: linkerd-proxy-injector
namespace: {{ .Release.Namespace }}
labels:
linkerd.io/control-plane-component: proxy-injector
linkerd.io/control-plane-ns: {{.Release.Namespace}}
{{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
annotations:
{{ include "partials.annotations.created-by" . }}
spec:
maxUnavailable: {{ .Values.controller.podDisruptionBudget.maxUnavailable }}
selector:
matchLabels:
linkerd.io/control-plane-component: proxy-injector
{{- end }}

View File

@ -0,0 +1,119 @@
{{ if .Values.enablePSP -}}
---
###
### Control Plane PSP
###
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: linkerd-{{.Release.Namespace}}-control-plane
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: "runtime/default"
labels:
linkerd.io/control-plane-ns: {{.Release.Namespace}}
{{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
spec:
{{- if or .Values.proxyInit.closeWaitTimeoutSecs .Values.proxyInit.runAsRoot }}
allowPrivilegeEscalation: true
{{- else }}
allowPrivilegeEscalation: false
{{- end }}
readOnlyRootFilesystem: true
{{- if empty .Values.cniEnabled }}
allowedCapabilities:
- NET_ADMIN
- NET_RAW
{{- end}}
requiredDropCapabilities:
- ALL
hostNetwork: false
hostIPC: false
hostPID: false
seLinux:
rule: RunAsAny
runAsUser:
{{- if .Values.cniEnabled }}
rule: MustRunAsNonRoot
{{- else }}
rule: RunAsAny
{{- end }}
runAsGroup:
{{- if .Values.cniEnabled }}
rule: MustRunAs
ranges:
- min: 1000
max: 999999
{{- else }}
rule: RunAsAny
{{- end }}
supplementalGroups:
rule: MustRunAs
ranges:
{{- if .Values.cniEnabled }}
- min: 10001
max: 65535
{{- else }}
- min: 1
max: 65535
{{- end }}
fsGroup:
rule: MustRunAs
ranges:
{{- if .Values.cniEnabled }}
- min: 10001
max: 65535
{{- else }}
- min: 1
max: 65535
{{- end }}
volumes:
- configMap
- emptyDir
- secret
- projected
- downwardAPI
- persistentVolumeClaim
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: linkerd-psp
namespace: {{ .Release.Namespace }}
labels:
linkerd.io/control-plane-ns: {{.Release.Namespace}}
{{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
rules:
- apiGroups: ['policy', 'extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames:
- linkerd-{{.Release.Namespace}}-control-plane
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: linkerd-psp
namespace: {{ .Release.Namespace }}
labels:
linkerd.io/control-plane-ns: {{.Release.Namespace}}
{{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }}
roleRef:
kind: Role
name: linkerd-psp
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: linkerd-destination
namespace: {{.Release.Namespace}}
{{ if not .Values.disableHeartBeat -}}
- kind: ServiceAccount
name: linkerd-heartbeat
namespace: {{.Release.Namespace}}
{{ end -}}
- kind: ServiceAccount
name: linkerd-identity
namespace: {{.Release.Namespace}}
- kind: ServiceAccount
name: linkerd-proxy-injector
namespace: {{.Release.Namespace}}
{{ end -}}

View File

@ -0,0 +1,63 @@
# This values.yaml file contains the values needed to enable HA mode.
# Usage:
# helm install -f values-ha.yaml
# -- Create PodDisruptionBudget resources for each control plane workload
enablePodDisruptionBudget: true
controller:
# -- sets pod disruption budget parameter for all deployments
podDisruptionBudget:
# -- Maximum number of pods that can be unavailable during disruption
maxUnavailable: 1
# -- Specify a deployment strategy for each control plane workload
deploymentStrategy:
rollingUpdate:
maxUnavailable: 1
maxSurge: 25%
# -- add PodAntiAffinity to each control plane workload
enablePodAntiAffinity: true
# nodeAffinity:
# proxy configuration
proxy:
resources:
cpu:
request: 100m
memory:
limit: 250Mi
request: 20Mi
# controller configuration
controllerReplicas: 3
controllerResources: &controller_resources
cpu: &controller_resources_cpu
limit: ""
request: 100m
memory:
limit: 250Mi
request: 50Mi
destinationResources: *controller_resources
# identity configuration
identityResources:
cpu: *controller_resources_cpu
memory:
limit: 250Mi
request: 10Mi
# heartbeat configuration
heartbeatResources: *controller_resources
# proxy injector configuration
proxyInjectorResources: *controller_resources
webhookFailurePolicy: Fail
# service profile validator configuration
spValidatorResources: *controller_resources
# flag for linkerd check
highAvailability: true

View File

@ -0,0 +1,664 @@
# Default values for linkerd.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# -- Kubernetes DNS Domain name to use
clusterDomain: cluster.local
# -- The cluster networks for which service discovery is performed. This should
# include the pod and service networks, but need not include the node network.
#
# By default, all IPv4 private networks and all accepted IPv6 ULAs are
# specified so that resolution works in typical Kubernetes environments.
clusterNetworks: "10.0.0.0/8,100.64.0.0/10,172.16.0.0/12,192.168.0.0/16,fd00::/8"
# -- Docker image pull policy
imagePullPolicy: IfNotPresent
# -- Specifies the number of old ReplicaSets to retain to allow rollback.
revisionHistoryLimit: 10
# -- Log level for the control plane components
controllerLogLevel: info
# -- Log format for the control plane components
controllerLogFormat: plain
# -- enables control plane tracing
controlPlaneTracing: false
# -- namespace to send control plane traces to
controlPlaneTracingNamespace: linkerd-jaeger
# -- control plane version. See Proxy section for proxy version
linkerdVersion: edge-24.9.1
# -- default kubernetes deployment strategy
deploymentStrategy:
rollingUpdate:
maxUnavailable: 25%
maxSurge: 25%
# -- enables the use of EndpointSlice informers for the destination service;
# enableEndpointSlices should be set to true only if EndpointSlice K8s feature
# gate is on
enableEndpointSlices: true
# -- enables pod anti affinity creation on deployments for high availability
enablePodAntiAffinity: false
# -- enables the use of pprof endpoints on control plane component's admin
# servers
enablePprof: false
# -- enables the creation of pod disruption budgets for control plane components
enablePodDisruptionBudget: false
# -- disables routing IPv6 traffic in addition to IPv4 traffic through the
# proxy (IPv6 routing only available as of proxy-init v2.3.0 and linkerd-cni
# v1.4.0)
disableIPv6: true
controller:
# -- sets pod disruption budget parameter for all deployments
podDisruptionBudget:
# -- Maximum number of pods that can be unavailable during disruption
maxUnavailable: 1
# -- enabling this omits the NET_ADMIN capability in the PSP
# and the proxy-init container when injecting the proxy;
# requires the linkerd-cni plugin to already be installed
cniEnabled: false
# -- Trust root certificate (ECDSA). It must be provided during install.
identityTrustAnchorsPEM: |
# -- Trust domain used for identity
# @default -- clusterDomain
identityTrustDomain: ""
kubeAPI: &kubeapi
# -- Maximum QPS sent to the kube-apiserver before throttling.
# See [token bucket rate limiter
# implementation](https://github.com/kubernetes/client-go/blob/v12.0.0/util/flowcontrol/throttle.go)
clientQPS: 100
# -- Burst value over clientQPS
clientBurst: 200
# -- Additional annotations to add to all pods
podAnnotations: {}
# -- Additional labels to add to all pods
podLabels: {}
# -- Labels to apply to all resources
commonLabels: {}
# -- Kubernetes priorityClassName for the Linkerd Pods
priorityClassName: ""
# -- Runtime Class Name for all the pods
runtimeClassName: ""
# policy controller configuration
policyController:
image:
# -- Docker image for the policy controller
name: cr.l5d.io/linkerd/policy-controller
# -- Pull policy for the policy controller container image
# @default -- imagePullPolicy
pullPolicy: ""
# -- Tag for the policy controller container image
# @default -- linkerdVersion
version: ""
# -- Log level for the policy controller
logLevel: info
# -- The networks from which probes are performed.
#
# By default, all networks are allowed so that all probes are authorized.
probeNetworks:
- 0.0.0.0/0
- "::/0"
# -- policy controller resource requests & limits
resources:
cpu:
# -- Maximum amount of CPU units that the policy controller can use
limit: ""
# -- Amount of CPU units that the policy controller requests
request: ""
memory:
# -- Maximum amount of memory that the policy controller can use
limit: ""
# -- Maximum amount of memory that the policy controller requests
request: ""
ephemeral-storage:
# -- Maximum amount of ephemeral storage that the policy controller can use
limit: ""
# -- Amount of ephemeral storage that the policy controller requests
request: ""
livenessProbe:
timeoutSeconds: 1
readinessProbe:
timeoutSeconds: 1
# proxy configuration
proxy:
# -- Enable service profiles for non-Kubernetes services
enableExternalProfiles: false
# -- Maximum time allowed for the proxy to establish an outbound TCP
# connection
outboundConnectTimeout: 1000ms
# -- Maximum time allowed for the proxy to establish an inbound TCP
# connection
inboundConnectTimeout: 100ms
# -- Maximum time allowed before an unused outbound discovery result
# is evicted from the cache
outboundDiscoveryCacheUnusedTimeout: "5s"
# -- Maximum time allowed before an unused inbound discovery result
# is evicted from the cache
inboundDiscoveryCacheUnusedTimeout: "90s"
# -- When set to true, disables the protocol detection timeout on the
# outbound side of the proxy by setting it to a very high value
disableOutboundProtocolDetectTimeout: false
# -- When set to true, disables the protocol detection timeout on the inbound
# side of the proxy by setting it to a very high value
disableInboundProtocolDetectTimeout: false
image:
# -- Docker image for the proxy
name: cr.l5d.io/linkerd/proxy
# -- Pull policy for the proxy container image
# @default -- imagePullPolicy
pullPolicy: ""
# -- Tag for the proxy container image
# @default -- linkerdVersion
version: ""
# -- Enables the proxy's /shutdown admin endpoint
enableShutdownEndpoint: false
# -- Log level for the proxy
logLevel: warn,linkerd=info,hickory=error
# -- Log format (`plain` or `json`) for the proxy
logFormat: plain
# -- (`off` or `insecure`) If set to `off`, will prevent the proxy from
# logging HTTP headers. If set to `insecure`, HTTP headers may be logged
# verbatim. Note that setting this to `insecure` is not alone sufficient to
# log HTTP headers; the proxy logLevel must also be set to debug.
logHTTPHeaders: "off"
ports:
# -- Admin port for the proxy container
admin: 4191
# -- Control port for the proxy container
control: 4190
# -- Inbound port for the proxy container
inbound: 4143
# -- Outbound port for the proxy container
outbound: 4140
# -- The `cpu.limit` and `cores` should be kept in sync. The value of `cores`
# must be an integer and should typically be set by rounding up from the
# limit. E.g. if cpu.limit is '1500m', cores should be 2.
cores: 0
resources:
cpu:
# -- Maximum amount of CPU units that the proxy can use
limit: ""
# -- Amount of CPU units that the proxy requests
request: ""
memory:
# -- Maximum amount of memory that the proxy can use
limit: ""
# -- Maximum amount of memory that the proxy requests
request: ""
ephemeral-storage:
# -- Maximum amount of ephemeral storage that the proxy can use
limit: ""
# -- Amount of ephemeral storage that the proxy requests
request: ""
# -- User id under which the proxy runs
uid: 2102
# -- (int) Optional customisation of the group id under which the proxy runs (the group ID will be omitted if lower than 0)
gid: -1
# -- If set the injected proxy sidecars in the data plane will stay alive for
# at least the given period before receiving the SIGTERM signal from
# Kubernetes but no longer than the pod's `terminationGracePeriodSeconds`.
# See [Lifecycle
# hooks](https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks)
# for more info on container lifecycle hooks.
waitBeforeExitSeconds: 0
# -- If set, the application container will not start until the proxy is
# ready
await: true
requireIdentityOnInboundPorts: ""
# -- Default set of opaque ports
# - SMTP (25,587) server-first
# - MYSQL (3306) server-first
# - Galera (4444) server-first
# - PostgreSQL (5432) server-first
# - Redis (6379) server-first
# - ElasticSearch (9300) server-first
# - Memcached (11211) clients do not issue any preamble, which breaks detection
opaquePorts: "25,587,3306,4444,5432,6379,9300,11211"
# -- Grace period for graceful proxy shutdowns. If this timeout elapses before all open connections have completed, the proxy will terminate forcefully, closing any remaining connections.
shutdownGracePeriod: ""
# -- The default allow policy to use when no `Server` selects a pod. One of: "all-authenticated",
# "all-unauthenticated", "cluster-authenticated", "cluster-unauthenticated", "deny", "audit"
# @default -- "all-unauthenticated"
defaultInboundPolicy: "all-unauthenticated"
# -- Enable KEP-753 native sidecars
# This is an experimental feature. It requires Kubernetes >= 1.29.
# If enabled, .proxy.waitBeforeExitSeconds should not be used.
nativeSidecar: false
# -- Native sidecar proxy startup probe parameters.
# -- LivenessProbe timeout and delay configuration
livenessProbe:
initialDelaySeconds: 10
timeoutSeconds: 1
# -- ReadinessProbe timeout and delay configuration
readinessProbe:
initialDelaySeconds: 2
timeoutSeconds: 1
startupProbe:
initialDelaySeconds: 0
periodSeconds: 1
failureThreshold: 120
# Configures general properties of the proxy's control plane clients.
control:
# Configures limits on API response streams.
streams:
# -- The timeout for the first update from the control plane.
initialTimeout: "3s"
# -- The timeout between consecutive updates from the control plane.
idleTimeout: "5m"
# -- The maximum duration for a response stream (i.e. before it will be
# reinitialized).
lifetime: "1h"
inbound:
server:
http2:
# -- The interval at which PINGs are issued to remote HTTP/2 clients.
keepAliveInterval: "10s"
# -- The timeout within which keep-alive PINGs must be acknowledged on inbound HTTP/2 connections.
keepAliveTimeout: "3s"
outbound:
server:
http2:
# -- The interval at which PINGs are issued to local application HTTP/2 clients.
keepAliveInterval: "10s"
# -- The timeout within which keep-alive PINGs must be acknowledged on outbound HTTP/2 connections.
keepAliveTimeout: "3s"
# proxy-init configuration
proxyInit:
# -- Variant of iptables that will be used to configure routing. Currently,
# proxy-init can be run either in 'nft' or in 'legacy' mode. The mode will
# control which utility binary will be called. The host must support
# whichever mode will be used
iptablesMode: "legacy"
# -- Default set of inbound ports to skip via iptables
# - Galera (4567,4568)
ignoreInboundPorts: "4567,4568"
# -- Default set of outbound ports to skip via iptables
# - Galera (4567,4568)
ignoreOutboundPorts: "4567,4568"
# -- Default set of ports to skip via iptables for control plane
# components so they can communicate with the Kubernetes API Server
kubeAPIServerPorts: "443,6443"
# -- Comma-separated list of subnets in valid CIDR format that should be skipped by the proxy
skipSubnets: ""
# -- Log level for the proxy-init
# @default -- info
logLevel: ""
# -- Log format (`plain` or `json`) for the proxy-init
# @default -- plain
logFormat: ""
image:
# -- Docker image for the proxy-init container
name: cr.l5d.io/linkerd/proxy-init
# -- Pull policy for the proxy-init container image
# @default -- imagePullPolicy
pullPolicy: ""
# -- Tag for the proxy-init container image
version: v2.4.1
closeWaitTimeoutSecs: 0
# -- Privileged mode allows the container processes to inherit all security
# capabilities and bypass any security limitations enforced by the kubelet.
# When used with 'runAsRoot: true', the container will behave exactly as if
# it was running as root on the host. May escape cgroup limits and see other
# processes and devices on the host.
# @default -- false
privileged: false
# -- Allow overriding the runAsNonRoot behaviour (<https://github.com/linkerd/linkerd2/issues/7308>)
runAsRoot: false
# -- This value is used only if runAsRoot is false; otherwise runAsUser will be 0
runAsUser: 65534
# -- This value is used only if runAsRoot is false; otherwise runAsGroup will be 0
runAsGroup: 65534
xtMountPath:
mountPath: /run
name: linkerd-proxy-init-xtables-lock
# network validator configuration
# This runs on a host that uses iptables to reroute network traffic. The validator
# ensures that iptables is correctly routing requests before we start linkerd.
networkValidator:
# -- Log level for the network-validator
# @default -- debug
logLevel: debug
# -- Log format (`plain` or `json`) for network-validator
# @default -- plain
logFormat: plain
# -- Address to which the network-validator will attempt to connect. This should be an IP
# that the cluster is expected to be able to reach but a port it should not, e.g., a public IP
# for public clusters and a private IP for air-gapped clusters with a port like 20001.
# If empty, defaults to 1.1.1.1:20001 and [fd00::1]:20001 for IPv4 and IPv6 respectively.
connectAddr: ""
# -- Address to which network-validator listens to requests from itself.
# If empty, defaults to 0.0.0.0:4140 and [::]:4140 for IPv4 and IPv6 respectively.
listenAddr: ""
# -- Timeout before network-validator fails to validate the pod's network connectivity
timeout: "10s"
# -- Include a securityContext in the network-validator pod spec
enableSecurityContext: true
# -- For Private docker registries, authentication is needed.
# Registry secrets are applied to the respective service accounts
imagePullSecrets: []
# - name: my-private-docker-registry-login-secret
# -- Allow proxies to perform transparent HTTP/2 upgrading
enableH2Upgrade: true
# -- Add a PSP resource and bind it to the control plane ServiceAccounts. Note
# PSP has been deprecated since k8s v1.21
enablePSP: false
# -- Failure policy for the proxy injector
webhookFailurePolicy: Ignore
# controllerImage -- Docker image for the destination and identity components
controllerImage: cr.l5d.io/linkerd/controller
# -- Optionally allow a specific container image Tag (or SHA) to be specified for the controllerImage.
controllerImageVersion: ""
# -- Number of replicas for each control plane pod
controllerReplicas: 1
# -- User ID for the control plane components
controllerUID: 2103
# -- (int) Optional customisation of the group ID for the control plane components (the group ID will be omitted if lower than 0)
controllerGID: -1
# destination configuration
# set resources for the sp-validator and its linkerd proxy respectively
# see proxy.resources for details.
# destinationResources -- CPU, Memory and Ephemeral Storage resources required by destination (see `proxy.resources` for sub-fields)
#destinationResources:
# destinationProxyResources -- CPU, Memory and Ephemeral Storage resources required by proxy injected into destination pod (see `proxy.resources` for sub-fields)
#destinationProxyResources:
destinationController:
meshedHttp2ClientProtobuf:
keep_alive:
interval:
seconds: 10
timeout:
seconds: 3
while_idle: true
livenessProbe:
timeoutSeconds: 1
readinessProbe:
timeoutSeconds: 1
# debug configuration
debugContainer:
image:
# -- Docker image for the debug container
name: cr.l5d.io/linkerd/debug
# -- Pull policy for the debug container image
# @default -- imagePullPolicy
pullPolicy: ""
# -- Tag for the debug container image
# @default -- linkerdVersion
version: ""
identity:
# -- If the linkerd-identity-trust-roots ConfigMap has already been created
externalCA: false
# -- Use [Service Account token Volume projection](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#service-account-token-volume-projection) for pod validation instead of the default token
serviceAccountTokenProjection: true
issuer:
scheme: linkerd.io/tls
# -- Amount of time to allow for clock skew within a Linkerd cluster
clockSkewAllowance: 20s
# -- Amount of time for which the Identity issuer should certify identity
issuanceLifetime: 24h0m0s
# -- Which scheme is used for the identity issuer secret format
tls:
# -- Issuer certificate (ECDSA). It must be provided during install.
crtPEM: |
# -- Key for the issuer certificate (ECDSA). It must be provided during
# install
keyPEM: |
kubeAPI: *kubeapi
livenessProbe:
timeoutSeconds: 1
readinessProbe:
timeoutSeconds: 1
# -|- CPU, Memory and Ephemeral Storage resources required by the identity controller (see `proxy.resources` for sub-fields)
#identityResources:
# -|- CPU, Memory and Ephemeral Storage resources required by proxy injected into identity pod (see `proxy.resources` for sub-fields)
#identityProxyResources:
# heartbeat configuration
# disableHeartBeat -- Set to true to not start the heartbeat cronjob
disableHeartBeat: false
# -- Config for the heartbeat cronjob
# heartbeatSchedule: "0 0 * * *"
# proxy injector configuration
proxyInjector:
# -- Timeout in seconds before the API Server cancels a request to the proxy
# injector. If timeout is exceeded, the webhookfailurePolicy is used.
timeoutSeconds: 10
# -- Do not create a secret resource for the proxyInjector webhook.
# If this is set to `true`, the value `proxyInjector.caBundle` must be set
# or the ca bundle must injected with cert-manager ca injector using
# `proxyInjector.injectCaFrom` or `proxyInjector.injectCaFromSecret` (see below).
externalSecret: false
# -- Namespace selector used by admission webhook.
namespaceSelector:
matchExpressions:
- key: config.linkerd.io/admission-webhooks
operator: NotIn
values:
- disabled
- key: kubernetes.io/metadata.name
operator: NotIn
values:
- kube-system
- cert-manager
# -- Object selector used by admission webhook.
objectSelector:
matchExpressions:
- key: linkerd.io/control-plane-component
operator: DoesNotExist
- key: linkerd.io/cni-resource
operator: DoesNotExist
# -- Certificate for the proxy injector. If not provided and not using an external secret
# then Helm will generate one.
crtPEM: |
# -- Certificate key for the proxy injector. If not provided and not using an external secret
# then Helm will generate one.
keyPEM: |
# -- Bundle of CA certificates for proxy injector.
# If not provided nor injected with cert-manager,
# then Helm will use the certificate generated for `proxyInjector.crtPEM`.
# If `proxyInjector.externalSecret` is set to true, this value, injectCaFrom, or
# injectCaFromSecret must be set, as no certificate will be generated.
# See the cert-manager [CA Injector Docs](https://cert-manager.io/docs/concepts/ca-injector) for more information.
caBundle: |
# -- Inject the CA bundle from a cert-manager Certificate.
# See the cert-manager [CA Injector Docs](https://cert-manager.io/docs/concepts/ca-injector/#injecting-ca-data-from-a-certificate-resource)
# for more information.
injectCaFrom: ""
# -- Inject the CA bundle from a Secret.
# If set, the `cert-manager.io/inject-ca-from-secret` annotation will be added to the webhook.
# The Secret must have the CA Bundle stored in the `ca.crt` key and have
# the `cert-manager.io/allow-direct-injection` annotation set to `true`.
# See the cert-manager [CA Injector Docs](https://cert-manager.io/docs/concepts/ca-injector/#injecting-ca-data-from-a-secret-resource)
# for more information.
injectCaFromSecret: ""
livenessProbe:
timeoutSeconds: 1
readinessProbe:
timeoutSeconds: 1
# -|- CPU, Memory and Ephemeral Storage resources required by the proxy injector (see
#`proxy.resources` for sub-fields)
#proxyInjectorResources:
#-|- CPU, Memory and Ephemeral Storage resources required by proxy injected into the proxy injector
#pod (see `proxy.resources` for sub-fields)
#proxyInjectorProxyResources:
# service profile validator configuration
profileValidator:
# -- Do not create a secret resource for the profileValidator webhook.
# If this is set to `true`, the value `proxyInjector.caBundle` must be set
# or the ca bundle must injected with cert-manager ca injector using
# `proxyInjector.injectCaFrom` or `proxyInjector.injectCaFromSecret` (see below).
externalSecret: false
# -- Namespace selector used by admission webhook
namespaceSelector:
matchExpressions:
- key: config.linkerd.io/admission-webhooks
operator: NotIn
values:
- disabled
# -- Certificate for the service profile validator. If not provided and not using an external secret
# then Helm will generate one.
crtPEM: |
# -- Certificate key for the service profile validator. If not provided and not using an external secret
# then Helm will generate one.
keyPEM: |
# -- Bundle of CA certificates for proxy injector.
# If not provided nor injected with cert-manager,
# then Helm will use the certificate generated for `profileValidator.crtPEM`.
# If `profileValidator.externalSecret` is set to true, this value, injectCaFrom, or
# injectCaFromSecret must be set, as no certificate will be generated.
# See the cert-manager [CA Injector Docs](https://cert-manager.io/docs/concepts/ca-injector) for more information.
caBundle: |
# -- Inject the CA bundle from a cert-manager Certificate.
# See the cert-manager [CA Injector Docs](https://cert-manager.io/docs/concepts/ca-injector/#injecting-ca-data-from-a-certificate-resource)
# for more information.
injectCaFrom: ""
# -- Inject the CA bundle from a Secret.
# If set, the `cert-manager.io/inject-ca-from-secret` annotation will be added to the webhook.
# The Secret must have the CA Bundle stored in the `ca.crt` key and have
# the `cert-manager.io/allow-direct-injection` annotation set to `true`.
# See the cert-manager [CA Injector Docs](https://cert-manager.io/docs/concepts/ca-injector/#injecting-ca-data-from-a-secret-resource)
# for more information.
injectCaFromSecret: ""
# policy validator configuration
policyValidator:
# -- Do not create a secret resource for the policyValidator webhook.
# If this is set to `true`, the value `policyValidator.caBundle` must be set
# or the ca bundle must injected with cert-manager ca injector using
# `policyValidator.injectCaFrom` or `policyValidator.injectCaFromSecret` (see below).
externalSecret: false
# -- Namespace selector used by admission webhook
namespaceSelector:
matchExpressions:
- key: config.linkerd.io/admission-webhooks
operator: NotIn
values:
- disabled
# -- Certificate for the policy validator. If not provided and not using an external secret
# then Helm will generate one.
crtPEM: |
# -- Certificate key for the policy validator. If not provided and not using an external secret
# then Helm will generate one.
keyPEM: |
# -- Bundle of CA certificates for proxy injector.
# If not provided nor injected with cert-manager,
# then Helm will use the certificate generated for `policyValidator.crtPEM`.
# If `policyValidator.externalSecret` is set to true, this value, injectCaFrom, or
# injectCaFromSecret must be set, as no certificate will be generated.
# See the cert-manager [CA Injector Docs](https://cert-manager.io/docs/concepts/ca-injector) for more information.
caBundle: |
# -- Inject the CA bundle from a cert-manager Certificate.
# See the cert-manager [CA Injector Docs](https://cert-manager.io/docs/concepts/ca-injector/#injecting-ca-data-from-a-certificate-resource)
# for more information.
injectCaFrom: ""
# -- Inject the CA bundle from a Secret.
# If set, the `cert-manager.io/inject-ca-from-secret` annotation will be added to the webhook.
# The Secret must have the CA Bundle stored in the `ca.crt` key and have
# the `cert-manager.io/allow-direct-injection` annotation set to `true`.
# See the cert-manager [CA Injector Docs](https://cert-manager.io/docs/concepts/ca-injector/#injecting-ca-data-from-a-secret-resource)
# for more information.
injectCaFromSecret: ""
# -- NodeSelector section, See the [K8S
# documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector)
# for more information
nodeSelector:
kubernetes.io/os: linux
# -- SP validator configuration
spValidator:
livenessProbe:
timeoutSeconds: 1
readinessProbe:
timeoutSeconds: 1
# -|- CPU, Memory and Ephemeral Storage resources required by the SP validator (see
#`proxy.resources` for sub-fields)
#spValidatorResources:
# -|- Tolerations section, See the
# [K8S documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/)
# for more information
#tolerations:
# -|- NodeAffinity section, See the
# [K8S documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity)
# for more information
#nodeAffinity:
# -- url of external prometheus instance (used for the heartbeat)
prometheusUrl: ""
# Prometheus Operator PodMonitor configuration
podMonitor:
# -- Enables the creation of Prometheus Operator [PodMonitor](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.PodMonitor)
enabled: false
# -- Interval at which metrics should be scraped
scrapeInterval: 10s
# -- Iimeout after which the scrape is ended
scrapeTimeout: 10s
# -- Labels to apply to all pod Monitors
labels: {}
controller:
# -- Enables the creation of PodMonitor for the control-plane
enabled: true
# -- Selector to select which namespaces the Endpoints objects are discovered from
namespaceSelector: |
matchNames:
- {{ .Release.Namespace }}
- linkerd-viz
- linkerd-jaeger
serviceMirror:
# -- Enables the creation of PodMonitor for the Service Mirror component
enabled: true
proxy:
# -- Enables the creation of PodMonitor for the data-plane
enabled: true

View File

@ -0,0 +1,22 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
OWNERS
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj

View File

@ -0,0 +1,6 @@
dependencies:
- name: partials
repository: file://../partials
version: 0.1.0
digest: sha256:8e42f9c9d4a2dc883f17f94d6044c97518ced19ad0922f47b8760e47135369ba
generated: "2021-08-17T10:42:52.610449255-05:00"

View File

@ -0,0 +1,26 @@
annotations:
catalog.cattle.io/certified: partner
catalog.cattle.io/display-name: Linkerd CRDs
catalog.cattle.io/kube-version: '>=1.22.0-0'
catalog.cattle.io/release-name: linkerd-crds
apiVersion: v2
dependencies:
- name: partials
repository: file://./charts/partials
version: 0.1.0
description: 'Linkerd gives you observability, reliability, and security for your
microservices — with no code change required. '
home: https://linkerd.io
icon: file://assets/icons/linkerd-crds.png
keywords:
- service-mesh
kubeVersion: '>=1.22.0-0'
maintainers:
- email: cncf-linkerd-dev@lists.cncf.io
name: Linkerd authors
url: https://linkerd.io/
name: linkerd-crds
sources:
- https://github.com/linkerd/linkerd2/
type: application
version: 2024.9.1

View File

@ -0,0 +1,71 @@
# linkerd-crds
Linkerd gives you observability, reliability, and security
for your microservices — with no code change required.
![Version: 2024.9.1](https://img.shields.io/badge/Version-2024.9.1-informational?style=flat-square)
![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
**Homepage:** <https://linkerd.io>
## Quickstart and documentation
You can run Linkerd on any Kubernetes cluster in a matter of seconds. See the
[Linkerd Getting Started Guide][getting-started] for how.
For more comprehensive documentation, start with the [Linkerd
docs][linkerd-docs].
## Adding Linkerd's Helm repository
```bash
# To add the repo for Linkerd edge releases:
helm repo add linkerd https://helm.linkerd.io/edge
```
## Installing the linkerd-crds chart
This installs the `linkerd-crds` chart, which only persists the CRDs that
Linkerd requires.
After installing this chart, you need then to install the
`linkerd-control-plane` chart in the same namespace, which provides all the
linkerd core control components.
```bash
helm install linkerd-crds -n linkerd --create-namespace linkerd/linkerd-crds
```
## Get involved
* Check out Linkerd's source code at [GitHub][linkerd2].
* Join Linkerd's [user mailing list][linkerd-users], [developer mailing
list][linkerd-dev], and [announcements mailing list][linkerd-announce].
* Follow [@linkerd][twitter] on Twitter.
* Join the [Linkerd Slack][slack].
[getting-started]: https://linkerd.io/2/getting-started/
[linkerd2]: https://github.com/linkerd/linkerd2
[linkerd-announce]: https://lists.cncf.io/g/cncf-linkerd-announce
[linkerd-dev]: https://lists.cncf.io/g/cncf-linkerd-dev
[linkerd-docs]: https://linkerd.io/2/overview/
[linkerd-users]: https://lists.cncf.io/g/cncf-linkerd-users
[slack]: http://slack.linkerd.io
[twitter]: https://twitter.com/linkerd
## Requirements
Kubernetes: `>=1.22.0-0`
| Repository | Name | Version |
|------------|------|---------|
| file://../partials | partials | 0.1.0 |
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| enableHttpRoutes | bool | `true` | |
----------------------------------------------
Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0)

View File

@ -0,0 +1,59 @@
{{ template "chart.header" . }}
{{ template "chart.description" . }}
{{ template "chart.versionBadge" . }}
{{ template "chart.typeBadge" . }}
{{ template "chart.appVersionBadge" . }}
{{ template "chart.homepageLine" . }}
## Quickstart and documentation
You can run Linkerd on any Kubernetes cluster in a matter of seconds. See the
[Linkerd Getting Started Guide][getting-started] for how.
For more comprehensive documentation, start with the [Linkerd
docs][linkerd-docs].
## Adding Linkerd's Helm repository
```bash
# To add the repo for Linkerd edge releases:
helm repo add linkerd https://helm.linkerd.io/edge
```
## Installing the linkerd-crds chart
This installs the `linkerd-crds` chart, which only persists the CRDs that
Linkerd requires.
After installing this chart, you need then to install the
`linkerd-control-plane` chart in the same namespace, which provides all the
linkerd core control components.
```bash
helm install linkerd-crds -n linkerd --create-namespace linkerd/linkerd-crds
```
## Get involved
* Check out Linkerd's source code at [GitHub][linkerd2].
* Join Linkerd's [user mailing list][linkerd-users], [developer mailing
list][linkerd-dev], and [announcements mailing list][linkerd-announce].
* Follow [@linkerd][twitter] on Twitter.
* Join the [Linkerd Slack][slack].
[getting-started]: https://linkerd.io/2/getting-started/
[linkerd2]: https://github.com/linkerd/linkerd2
[linkerd-announce]: https://lists.cncf.io/g/cncf-linkerd-announce
[linkerd-dev]: https://lists.cncf.io/g/cncf-linkerd-dev
[linkerd-docs]: https://linkerd.io/2/overview/
[linkerd-users]: https://lists.cncf.io/g/cncf-linkerd-users
[slack]: http://slack.linkerd.io
[twitter]: https://twitter.com/linkerd
{{ template "chart.requirementsSection" . }}
{{ template "chart.valuesSection" . }}
{{ template "helm-docs.versionFooter" . }}

View File

@ -0,0 +1,9 @@
# Linkerd 2 CRDs Chart
Linkerd is an ultra light, ultra simple, ultra powerful service mesh. Linkerd
adds security, observability, and reliability to Kubernetes, without the
complexity.
This particular Helm chart only installs Linkerd CRDs.
Full documentation available at: https://linkerd.io/2/overview/

View File

@ -0,0 +1,21 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj

View File

@ -0,0 +1,5 @@
apiVersion: v1
description: 'A Helm chart containing Linkerd partial templates, depended by the ''linkerd''
and ''patch'' charts. '
name: partials
version: 0.1.0

View File

@ -0,0 +1,9 @@
# partials
A Helm chart containing Linkerd partial templates,
depended by the 'linkerd' and 'patch' charts.
![Version: 0.1.0](https://img.shields.io/badge/Version-0.1.0-informational?style=flat-square)
----------------------------------------------
Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0)

View File

@ -0,0 +1,14 @@
{{ template "chart.header" . }}
{{ template "chart.description" . }}
{{ template "chart.versionBadge" . }}
{{ template "chart.typeBadge" . }}
{{ template "chart.appVersionBadge" . }}
{{ template "chart.homepageLine" . }}
{{ template "chart.requirementsSection" . }}
{{ template "chart.valuesSection" . }}
{{ template "helm-docs.versionFooter" . }}

View File

@ -0,0 +1,38 @@
{{ define "linkerd.pod-affinity" -}}
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchExpressions:
- key: {{ default "linkerd.io/control-plane-component" .label }}
operator: In
values:
- {{ .component }}
topologyKey: topology.kubernetes.io/zone
weight: 100
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: {{ default "linkerd.io/control-plane-component" .label }}
operator: In
values:
- {{ .component }}
topologyKey: kubernetes.io/hostname
{{- end }}
{{ define "linkerd.node-affinity" -}}
nodeAffinity:
{{- toYaml .Values.nodeAffinity | trim | nindent 2 }}
{{- end }}
{{ define "linkerd.affinity" -}}
{{- if or .Values.enablePodAntiAffinity .Values.nodeAffinity -}}
affinity:
{{- end }}
{{- if .Values.enablePodAntiAffinity -}}
{{- include "linkerd.pod-affinity" . | nindent 2 }}
{{- end }}
{{- if .Values.nodeAffinity -}}
{{- include "linkerd.node-affinity" . | nindent 2 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,16 @@
{{- define "partials.proxy.capabilities" -}}
capabilities:
{{- if .Values.proxy.capabilities.add }}
add:
{{- toYaml .Values.proxy.capabilities.add | trim | nindent 4 }}
{{- end }}
{{- if .Values.proxy.capabilities.drop }}
drop:
{{- toYaml .Values.proxy.capabilities.drop | trim | nindent 4 }}
{{- end }}
{{- end -}}
{{- define "partials.proxy-init.capabilities.drop" -}}
drop:
{{ toYaml .Values.proxyInit.capabilities.drop | trim }}
{{- end -}}

View File

@ -0,0 +1,15 @@
{{- define "partials.debug" -}}
image: {{.Values.debugContainer.image.name}}:{{.Values.debugContainer.image.version | default .Values.linkerdVersion}}
imagePullPolicy: {{.Values.debugContainer.image.pullPolicy | default .Values.imagePullPolicy}}
name: linkerd-debug
terminationMessagePolicy: FallbackToLogsOnError
# some environments require probes, so we provide some infallible ones
livenessProbe:
exec:
command:
- "true"
readinessProbe:
exec:
command:
- "true"
{{- end -}}

View File

@ -0,0 +1,14 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Splits a coma separated list into a list of string values.
For example "11,22,55,44" will become "11","22","55","44"
*/}}
{{- define "partials.splitStringList" -}}
{{- if gt (len (toString .)) 0 -}}
{{- $ports := toString . | splitList "," -}}
{{- $last := sub (len $ports) 1 -}}
{{- range $i,$port := $ports -}}
"{{$port}}"{{ternary "," "" (ne $i $last)}}
{{- end -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,17 @@
{{- define "partials.annotations.created-by" -}}
linkerd.io/created-by: {{ .Values.cliVersion | default (printf "linkerd/helm %s" ( (.Values.image).version | default .Values.linkerdVersion)) }}
{{- end -}}
{{- define "partials.proxy.annotations" -}}
linkerd.io/proxy-version: {{.Values.proxy.image.version | default .Values.linkerdVersion}}
cluster-autoscaler.kubernetes.io/safe-to-evict: "true"
linkerd.io/trust-root-sha256: {{ .Values.identityTrustAnchorsPEM | sha256sum }}
{{- end -}}
{{/*
To add labels to the control-plane components, instead update at individual component manifests as
adding here would also update `spec.selector.matchLabels` which are immutable and would fail upgrades.
*/}}
{{- define "partials.proxy.labels" -}}
linkerd.io/proxy-{{.workloadKind}}: {{.component}}
{{- end -}}

View File

@ -0,0 +1,45 @@
{{- define "partials.network-validator" -}}
name: linkerd-network-validator
image: {{.Values.proxy.image.name}}:{{.Values.proxy.image.version | default .Values.linkerdVersion }}
imagePullPolicy: {{.Values.proxy.image.pullPolicy | default .Values.imagePullPolicy}}
{{ include "partials.resources" .Values.proxy.resources }}
{{- if or .Values.networkValidator.enableSecurityContext }}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsGroup: 65534
runAsNonRoot: true
runAsUser: 65534
seccompProfile:
type: RuntimeDefault
{{- end }}
command:
- /usr/lib/linkerd/linkerd2-network-validator
args:
- --log-format
- {{ .Values.networkValidator.logFormat }}
- --log-level
- {{ .Values.networkValidator.logLevel }}
- --connect-addr
{{- if .Values.networkValidator.connectAddr }}
- {{ .Values.networkValidator.connectAddr | quote }}
{{- else if .Values.disableIPv6}}
- "1.1.1.1:20001"
{{- else }}
- "[fd00::1]:20001"
{{- end }}
- --listen-addr
{{- if .Values.networkValidator.listenAddr }}
- {{ .Values.networkValidator.listenAddr | quote }}
{{- else if .Values.disableIPv6}}
- "0.0.0.0:4140"
{{- else }}
- "[::]:4140"
{{- end }}
- --timeout
- {{ .Values.networkValidator.timeout }}
{{- end -}}

View File

@ -0,0 +1,4 @@
{{- define "linkerd.node-selector" -}}
nodeSelector:
{{- toYaml .Values.nodeSelector | trim | nindent 2 }}
{{- end -}}

View File

@ -0,0 +1,18 @@
{{- define "partials.proxy.config.annotations" -}}
{{- with .cpu }}
{{- with .request -}}
config.linkerd.io/proxy-cpu-request: {{. | quote}}
{{end}}
{{- with .limit -}}
config.linkerd.io/proxy-cpu-limit: {{. | quote}}
{{- end}}
{{- end}}
{{- with .memory }}
{{- with .request }}
config.linkerd.io/proxy-memory-request: {{. | quote}}
{{end}}
{{- with .limit -}}
config.linkerd.io/proxy-memory-limit: {{. | quote}}
{{- end}}
{{- end }}
{{- end }}

View File

@ -0,0 +1,98 @@
{{- define "partials.proxy-init" -}}
args:
{{- if (.Values.proxyInit.iptablesMode | default "legacy" | eq "nft") }}
- --firewall-bin-path
- "iptables-nft"
- --firewall-save-bin-path
- "iptables-nft-save"
{{- else if not (eq .Values.proxyInit.iptablesMode "legacy") }}
{{ fail (printf "Unsupported value \"%s\" for proxyInit.iptablesMode\nValid values: [\"nft\", \"legacy\"]" .Values.proxyInit.iptablesMode) }}
{{end -}}
{{- if .Values.disableIPv6 }}
- --ipv6=false
{{- end }}
- --incoming-proxy-port
- {{.Values.proxy.ports.inbound | quote}}
- --outgoing-proxy-port
- {{.Values.proxy.ports.outbound | quote}}
- --proxy-uid
- {{.Values.proxy.uid | quote}}
{{- if ge (int .Values.proxy.gid) 0 }}
- --proxy-gid
- {{.Values.proxy.gid | quote}}
{{- end }}
- --inbound-ports-to-ignore
- "{{.Values.proxy.ports.control}},{{.Values.proxy.ports.admin}}{{ternary (printf ",%s" (.Values.proxyInit.ignoreInboundPorts | toString)) "" (not (empty .Values.proxyInit.ignoreInboundPorts)) }}"
{{- if .Values.proxyInit.ignoreOutboundPorts }}
- --outbound-ports-to-ignore
- {{.Values.proxyInit.ignoreOutboundPorts | quote}}
{{- end }}
{{- if .Values.proxyInit.closeWaitTimeoutSecs }}
- --timeout-close-wait-secs
- {{ .Values.proxyInit.closeWaitTimeoutSecs | quote}}
{{- end }}
{{- if .Values.proxyInit.logFormat }}
- --log-format
- {{ .Values.proxyInit.logFormat }}
{{- end }}
{{- if .Values.proxyInit.logLevel }}
- --log-level
- {{ .Values.proxyInit.logLevel }}
{{- end }}
{{- if .Values.proxyInit.skipSubnets }}
- --subnets-to-ignore
- {{ .Values.proxyInit.skipSubnets | quote }}
{{- end }}
image: {{.Values.proxyInit.image.name}}:{{.Values.proxyInit.image.version}}
imagePullPolicy: {{.Values.proxyInit.image.pullPolicy | default .Values.imagePullPolicy}}
name: linkerd-init
{{ include "partials.resources" .Values.proxy.resources }}
securityContext:
{{- if or .Values.proxyInit.closeWaitTimeoutSecs .Values.proxyInit.privileged }}
allowPrivilegeEscalation: true
{{- else }}
allowPrivilegeEscalation: false
{{- end }}
capabilities:
add:
- NET_ADMIN
- NET_RAW
{{- if .Values.proxyInit.capabilities -}}
{{- if .Values.proxyInit.capabilities.add }}
{{- toYaml .Values.proxyInit.capabilities.add | trim | nindent 4 }}
{{- end }}
{{- if .Values.proxyInit.capabilities.drop -}}
{{- include "partials.proxy-init.capabilities.drop" . | nindent 4 -}}
{{- end }}
{{- end }}
{{- if or .Values.proxyInit.closeWaitTimeoutSecs .Values.proxyInit.privileged }}
privileged: true
{{- else }}
privileged: false
{{- end }}
{{- if .Values.proxyInit.runAsRoot }}
runAsGroup: 0
runAsNonRoot: false
runAsUser: 0
{{- else }}
runAsNonRoot: true
runAsUser: {{ .Values.proxyInit.runAsUser | int | eq 0 | ternary 65534 .Values.proxyInit.runAsUser }}
runAsGroup: {{ .Values.proxyInit.runAsGroup | int | eq 0 | ternary 65534 .Values.proxyInit.runAsGroup }}
{{- end }}
readOnlyRootFilesystem: true
seccompProfile:
type: RuntimeDefault
terminationMessagePolicy: FallbackToLogsOnError
{{- if or (not .Values.cniEnabled) .Values.proxyInit.saMountPath }}
volumeMounts:
{{- end -}}
{{- if not .Values.cniEnabled }}
- mountPath: {{.Values.proxyInit.xtMountPath.mountPath}}
name: {{.Values.proxyInit.xtMountPath.name}}
{{- end -}}
{{- if .Values.proxyInit.saMountPath }}
- mountPath: {{.Values.proxyInit.saMountPath.mountPath}}
name: {{.Values.proxyInit.saMountPath.name}}
readOnly: {{.Values.proxyInit.saMountPath.readOnly}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,267 @@
{{ define "partials.proxy" -}}
{{ if and .Values.proxy.nativeSidecar .Values.proxy.waitBeforeExitSeconds }}
{{ fail "proxy.nativeSidecar and waitBeforeExitSeconds cannot be used simultaneously" }}
{{- end }}
{{- if not (has .Values.proxy.logHTTPHeaders (list "insecure" "off" "")) }}
{{- fail "logHTTPHeaders must be one of: insecure | off" }}
{{- end }}
{{- $trustDomain := (.Values.identityTrustDomain | default .Values.clusterDomain) -}}
env:
- name: _pod_name
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: _pod_ns
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: _pod_nodeName
valueFrom:
fieldRef:
fieldPath: spec.nodeName
{{- if .Values.proxy.cores }}
- name: LINKERD2_PROXY_CORES
value: {{.Values.proxy.cores | quote}}
{{- end }}
{{ if .Values.proxy.requireIdentityOnInboundPorts -}}
- name: LINKERD2_PROXY_INBOUND_PORTS_REQUIRE_IDENTITY
value: {{.Values.proxy.requireIdentityOnInboundPorts | quote}}
{{ end -}}
{{ if .Values.proxy.requireTLSOnInboundPorts -}}
- name: LINKERD2_PROXY_INBOUND_PORTS_REQUIRE_TLS
value: {{.Values.proxy.requireTLSOnInboundPorts | quote}}
{{ end -}}
- name: LINKERD2_PROXY_SHUTDOWN_ENDPOINT_ENABLED
value: {{.Values.proxy.enableShutdownEndpoint | quote}}
- name: LINKERD2_PROXY_LOG
value: "{{.Values.proxy.logLevel}}{{ if not (eq .Values.proxy.logHTTPHeaders "insecure") }},[{headers}]=off,[{request}]=off{{ end }}"
- name: LINKERD2_PROXY_LOG_FORMAT
value: {{.Values.proxy.logFormat | quote}}
- name: LINKERD2_PROXY_DESTINATION_SVC_ADDR
value: {{ternary "localhost.:8086" (printf "linkerd-dst-headless.%s.svc.%s.:8086" .Release.Namespace .Values.clusterDomain) (eq (toString .Values.proxy.component) "linkerd-destination")}}
- name: LINKERD2_PROXY_DESTINATION_PROFILE_NETWORKS
value: {{.Values.clusterNetworks | quote}}
- name: LINKERD2_PROXY_POLICY_SVC_ADDR
value: {{ternary "localhost.:8090" (printf "linkerd-policy.%s.svc.%s.:8090" .Release.Namespace .Values.clusterDomain) (eq (toString .Values.proxy.component) "linkerd-destination")}}
- name: LINKERD2_PROXY_POLICY_WORKLOAD
value: |
{"ns":"$(_pod_ns)", "pod":"$(_pod_name)"}
- name: LINKERD2_PROXY_INBOUND_DEFAULT_POLICY
value: {{.Values.proxy.defaultInboundPolicy}}
- name: LINKERD2_PROXY_POLICY_CLUSTER_NETWORKS
value: {{.Values.clusterNetworks | quote}}
- name: LINKERD2_PROXY_CONTROL_STREAM_INITIAL_TIMEOUT
value: {{((.Values.proxy.control).streams).initialTimeout | default "" | quote}}
- name: LINKERD2_PROXY_CONTROL_STREAM_IDLE_TIMEOUT
value: {{((.Values.proxy.control).streams).idleTimeout | default "" | quote}}
- name: LINKERD2_PROXY_CONTROL_STREAM_LIFETIME
value: {{((.Values.proxy.control).streams).lifetime | default "" | quote}}
{{ if .Values.proxy.inboundConnectTimeout -}}
- name: LINKERD2_PROXY_INBOUND_CONNECT_TIMEOUT
value: {{.Values.proxy.inboundConnectTimeout | quote}}
{{ end -}}
{{ if .Values.proxy.outboundConnectTimeout -}}
- name: LINKERD2_PROXY_OUTBOUND_CONNECT_TIMEOUT
value: {{.Values.proxy.outboundConnectTimeout | quote}}
{{ end -}}
{{ if .Values.proxy.outboundDiscoveryCacheUnusedTimeout -}}
- name: LINKERD2_PROXY_OUTBOUND_DISCOVERY_IDLE_TIMEOUT
value: {{.Values.proxy.outboundDiscoveryCacheUnusedTimeout | quote}}
{{ end -}}
{{ if .Values.proxy.inboundDiscoveryCacheUnusedTimeout -}}
- name: LINKERD2_PROXY_INBOUND_DISCOVERY_IDLE_TIMEOUT
value: {{.Values.proxy.inboundDiscoveryCacheUnusedTimeout | quote}}
{{ end -}}
{{ if .Values.proxy.disableOutboundProtocolDetectTimeout -}}
- name: LINKERD2_PROXY_OUTBOUND_DETECT_TIMEOUT
value: "365d"
{{ end -}}
{{ if .Values.proxy.disableInboundProtocolDetectTimeout -}}
- name: LINKERD2_PROXY_INBOUND_DETECT_TIMEOUT
value: "365d"
{{ end -}}
- name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR
value: "{{ if .Values.disableIPv6 }}0.0.0.0{{ else }}[::]{{ end }}:{{.Values.proxy.ports.control}}"
- name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR
value: "{{ if .Values.disableIPv6 }}0.0.0.0{{ else }}[::]{{ end }}:{{.Values.proxy.ports.admin}}"
{{- /* Deprecated, superseded by LINKERD2_PROXY_OUTBOUND_LISTEN_ADDRS since proxy's v2.228.0 (deployed since edge-24.4.5) */}}
- name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR
value: "127.0.0.1:{{.Values.proxy.ports.outbound}}"
- name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDRS
value: "127.0.0.1:{{.Values.proxy.ports.outbound}}{{ if not .Values.disableIPv6}},[::1]:{{.Values.proxy.ports.outbound}}{{ end }}"
- name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR
value: "{{ if .Values.disableIPv6 }}0.0.0.0{{ else }}[::]{{ end }}:{{.Values.proxy.ports.inbound}}"
- name: LINKERD2_PROXY_INBOUND_IPS
valueFrom:
fieldRef:
fieldPath: status.podIPs
- name: LINKERD2_PROXY_INBOUND_PORTS
value: {{ .Values.proxy.podInboundPorts | quote }}
{{ if .Values.proxy.isGateway -}}
- name: LINKERD2_PROXY_INBOUND_GATEWAY_SUFFIXES
value: {{printf "svc.%s." .Values.clusterDomain}}
{{ end -}}
{{ if .Values.proxy.isIngress -}}
- name: LINKERD2_PROXY_INGRESS_MODE
value: "true"
{{ end -}}
- name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES
{{- $internalDomain := printf "svc.%s." .Values.clusterDomain }}
value: {{ternary "." $internalDomain .Values.proxy.enableExternalProfiles}}
- name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE
value: 10000ms
- name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE
value: 10000ms
{{- /* Configure inbound and outbound parameters, e.g. for HTTP/2 servers. */}}
{{ range $proxyK, $proxyV := (dict "inbound" .Values.proxy.inbound "outbound" .Values.proxy.outbound) -}}
{{ range $scopeK, $scopeV := $proxyV -}}
{{ range $protoK, $protoV := $scopeV -}}
{{ range $paramK, $paramV := $protoV -}}
- name: LINKERD2_PROXY_{{snakecase $proxyK | upper}}_{{snakecase $scopeK | upper}}_{{snakecase $protoK | upper}}_{{snakecase $paramK | upper}}
value: {{ quote $paramV }}
{{ end -}}
{{ end -}}
{{ end -}}
{{ end -}}
{{ if .Values.proxy.opaquePorts -}}
- name: LINKERD2_PROXY_INBOUND_PORTS_DISABLE_PROTOCOL_DETECTION
value: {{.Values.proxy.opaquePorts | quote}}
{{ end -}}
- name: LINKERD2_PROXY_DESTINATION_CONTEXT
value: |
{"ns":"$(_pod_ns)", "nodeName":"$(_pod_nodeName)", "pod":"$(_pod_name)"}
- name: _pod_sa
valueFrom:
fieldRef:
fieldPath: spec.serviceAccountName
- name: _l5d_ns
value: {{.Release.Namespace}}
- name: _l5d_trustdomain
value: {{$trustDomain}}
- name: LINKERD2_PROXY_IDENTITY_DIR
value: /var/run/linkerd/identity/end-entity
- name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS
{{- /*
Pods in the `linkerd` namespace are not injected by the proxy injector and instead obtain
the trust anchor bundle from the `linkerd-identity-trust-roots` configmap. This should not
be used in other contexts.
*/}}
{{- if .Values.proxy.loadTrustBundleFromConfigMap }}
valueFrom:
configMapKeyRef:
name: linkerd-identity-trust-roots
key: ca-bundle.crt
{{ else }}
value: |
{{- required "Please provide the identity trust anchors" .Values.identityTrustAnchorsPEM | trim | nindent 4 }}
{{ end -}}
- name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE
{{- if .Values.identity.serviceAccountTokenProjection }}
value: /var/run/secrets/tokens/linkerd-identity-token
{{ else }}
value: /var/run/secrets/kubernetes.io/serviceaccount/token
{{ end -}}
- name: LINKERD2_PROXY_IDENTITY_SVC_ADDR
value: {{ternary "localhost.:8080" (printf "linkerd-identity-headless.%s.svc.%s.:8080" .Release.Namespace .Values.clusterDomain) (eq (toString .Values.proxy.component) "linkerd-identity")}}
- name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME
value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.{{.Release.Namespace}}.{{$trustDomain}}
- name: LINKERD2_PROXY_IDENTITY_SVC_NAME
value: linkerd-identity.{{.Release.Namespace}}.serviceaccount.identity.{{.Release.Namespace}}.{{$trustDomain}}
- name: LINKERD2_PROXY_DESTINATION_SVC_NAME
value: linkerd-destination.{{.Release.Namespace}}.serviceaccount.identity.{{.Release.Namespace}}.{{$trustDomain}}
- name: LINKERD2_PROXY_POLICY_SVC_NAME
value: linkerd-destination.{{.Release.Namespace}}.serviceaccount.identity.{{.Release.Namespace}}.{{$trustDomain}}
{{ if .Values.proxy.accessLog -}}
- name: LINKERD2_PROXY_ACCESS_LOG
value: {{.Values.proxy.accessLog | quote}}
{{ end -}}
{{ if .Values.proxy.shutdownGracePeriod -}}
- name: LINKERD2_PROXY_SHUTDOWN_GRACE_PERIOD
value: {{.Values.proxy.shutdownGracePeriod | quote}}
{{ end -}}
{{ if .Values.proxy.additionalEnv -}}
{{ toYaml .Values.proxy.additionalEnv }}
{{ end -}}
{{ if .Values.proxy.experimentalEnv -}}
{{ toYaml .Values.proxy.experimentalEnv }}
{{ end -}}
image: {{.Values.proxy.image.name}}:{{.Values.proxy.image.version | default .Values.linkerdVersion}}
imagePullPolicy: {{.Values.proxy.image.pullPolicy | default .Values.imagePullPolicy}}
livenessProbe:
httpGet:
path: /live
port: {{.Values.proxy.ports.admin}}
initialDelaySeconds: {{.Values.proxy.livenessProbe.initialDelaySeconds }}
timeoutSeconds: {{.Values.proxy.livenessProbe.timeoutSeconds }}
name: linkerd-proxy
ports:
- containerPort: {{.Values.proxy.ports.inbound}}
name: linkerd-proxy
- containerPort: {{.Values.proxy.ports.admin}}
name: linkerd-admin
readinessProbe:
httpGet:
path: /ready
port: {{.Values.proxy.ports.admin}}
initialDelaySeconds: {{.Values.proxy.readinessProbe.initialDelaySeconds }}
timeoutSeconds: {{.Values.proxy.readinessProbe.timeoutSeconds }}
{{- if and .Values.proxy.nativeSidecar .Values.proxy.await }}
startupProbe:
httpGet:
path: /ready
port: {{.Values.proxy.ports.admin}}
initialDelaySeconds: {{.Values.proxy.startupProbe.initialDelaySeconds}}
periodSeconds: {{.Values.proxy.startupProbe.periodSeconds}}
failureThreshold: {{.Values.proxy.startupProbe.failureThreshold}}
{{- end }}
{{- if .Values.proxy.resources }}
{{ include "partials.resources" .Values.proxy.resources }}
{{- end }}
securityContext:
allowPrivilegeEscalation: false
{{- if .Values.proxy.capabilities -}}
{{- include "partials.proxy.capabilities" . | nindent 2 -}}
{{- end }}
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: {{.Values.proxy.uid}}
{{- if ge (int .Values.proxy.gid) 0 }}
runAsGroup: {{.Values.proxy.gid}}
{{- end }}
seccompProfile:
type: RuntimeDefault
terminationMessagePolicy: FallbackToLogsOnError
{{- if and (not .Values.proxy.nativeSidecar) (or .Values.proxy.await .Values.proxy.waitBeforeExitSeconds) }}
lifecycle:
{{- if .Values.proxy.await }}
postStart:
exec:
command:
- /usr/lib/linkerd/linkerd-await
- --timeout=2m
- --port={{.Values.proxy.ports.admin}}
{{- end }}
{{- if .Values.proxy.waitBeforeExitSeconds }}
preStop:
exec:
command:
- /bin/sleep
- {{.Values.proxy.waitBeforeExitSeconds | quote}}
{{- end }}
{{- end }}
volumeMounts:
- mountPath: /var/run/linkerd/identity/end-entity
name: linkerd-identity-end-entity
{{- if .Values.identity.serviceAccountTokenProjection }}
- mountPath: /var/run/secrets/tokens
name: linkerd-identity-token
{{- end }}
{{- if .Values.proxy.saMountPath }}
- mountPath: {{.Values.proxy.saMountPath.mountPath}}
name: {{.Values.proxy.saMountPath.name}}
readOnly: {{.Values.proxy.saMountPath.readOnly}}
{{- end -}}
{{- if .Values.proxy.nativeSidecar }}
restartPolicy: Always
{{- end -}}
{{- end }}

View File

@ -0,0 +1,6 @@
{{- define "partials.image-pull-secrets"}}
{{- if . }}
imagePullSecrets:
{{ toYaml . | indent 2 }}
{{- end }}
{{- end -}}

View File

@ -0,0 +1,28 @@
{{- define "partials.resources" -}}
{{- $ephemeralStorage := index . "ephemeral-storage" -}}
resources:
{{- if or (.cpu).limit (.memory).limit ($ephemeralStorage).limit }}
limits:
{{- with (.cpu).limit }}
cpu: {{. | quote}}
{{- end }}
{{- with (.memory).limit }}
memory: {{. | quote}}
{{- end }}
{{- with ($ephemeralStorage).limit }}
ephemeral-storage: {{. | quote}}
{{- end }}
{{- end }}
{{- if or (.cpu).request (.memory).request ($ephemeralStorage).request }}
requests:
{{- with (.cpu).request }}
cpu: {{. | quote}}
{{- end }}
{{- with (.memory).request }}
memory: {{. | quote}}
{{- end }}
{{- with ($ephemeralStorage).request }}
ephemeral-storage: {{. | quote}}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,4 @@
{{- define "linkerd.tolerations" -}}
tolerations:
{{ toYaml .Values.tolerations | trim | indent 2 }}
{{- end -}}

View File

@ -0,0 +1,5 @@
{{ define "partials.linkerd.trace" -}}
{{ if .Values.controlPlaneTracing -}}
- -trace-collector=collector.{{.Values.controlPlaneTracingNamespace}}.svc.{{.Values.clusterDomain}}:55678
{{ end -}}
{{- end }}

View File

@ -0,0 +1,19 @@
{{- define "linkerd.webhook.validation" -}}
{{- if and (.injectCaFrom) (.injectCaFromSecret) -}}
{{- fail "injectCaFrom and injectCaFromSecret cannot both be set" -}}
{{- end -}}
{{- if and (or (.injectCaFrom) (.injectCaFromSecret)) (.caBundle) -}}
{{- fail "injectCaFrom or injectCaFromSecret cannot be set if providing a caBundle" -}}
{{- end -}}
{{- if and (.externalSecret) (empty .caBundle) (empty .injectCaFrom) (empty .injectCaFromSecret) -}}
{{- fail "if externalSecret is set, then caBundle, injectCaFrom, or injectCaFromSecret must be set" -}}
{{- end }}
{{- if and (or .injectCaFrom .injectCaFromSecret .caBundle) (not .externalSecret) -}}
{{- fail "if caBundle, injectCaFrom, or injectCaFromSecret is set, then externalSecret must be set" -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,20 @@
{{ define "partials.proxy.volumes.identity" -}}
emptyDir:
medium: Memory
name: linkerd-identity-end-entity
{{- end -}}
{{ define "partials.proxyInit.volumes.xtables" -}}
emptyDir: {}
name: {{ .Values.proxyInit.xtMountPath.name }}
{{- end -}}
{{- define "partials.proxy.volumes.service-account-token" -}}
name: linkerd-identity-token
projected:
sources:
- serviceAccountToken:
path: linkerd-identity-token
expirationSeconds: 86400 {{- /* # 24 hours */}}
audience: identity.l5d.io
{{- end -}}

View File

@ -0,0 +1,6 @@
The linkerd-crds chart was successfully installed 🎉
To complete the linkerd core installation, please now proceed to install the
linkerd-control-plane chart in the {{ .Release.Namespace }} namespace.
Looking for more? Visit https://linkerd.io/2/getting-started/

View File

@ -0,0 +1,99 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: authorizationpolicies.policy.linkerd.io
annotations:
{{ include "partials.annotations.created-by" . }}
labels:
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
linkerd.io/control-plane-ns: {{.Release.Namespace}}
spec:
group: policy.linkerd.io
scope: Namespaced
names:
kind: AuthorizationPolicy
plural: authorizationpolicies
singular: authorizationpolicy
shortNames: [authzpolicy]
versions:
- name: v1alpha1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
required: [spec]
properties:
spec:
description: >-
Authorizes clients to communicate with Linkerd-proxied server
resources.
type: object
required: [targetRef, requiredAuthenticationRefs]
properties:
targetRef:
description: >-
TargetRef references a resource to which the authorization
policy applies.
type: object
required: [kind, name]
# Modified from the gateway API.
# Copyright 2020 The Kubernetes Authors
properties:
group:
description: >-
Group is the group of the referent. When empty, the
Kubernetes core API group is inferred.
maxLength: 253
pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
type: string
kind:
description: >-
Kind is the kind of the referent.
maxLength: 63
minLength: 1
pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$
type: string
name:
description: Name is the name of the referent.
maxLength: 253
minLength: 1
type: string
requiredAuthenticationRefs:
description: >-
RequiredAuthenticationRefs enumerates a set of required
authentications. ALL authentications must be satisfied for
the authorization to apply. If any of the referred objects
cannot be found, the authorization will be ignored.
type: array
items:
type: object
required: [kind, name]
properties:
group:
description: >-
Group is the group of the referent. When empty, the
Kubernetes core API group is inferred."
maxLength: 253
pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
type: string
kind:
description: >-
Kind is the kind of the referent.
maxLength: 63
minLength: 1
pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$
type: string
name:
description: >-
Name is the name of the referent.
maxLength: 253
minLength: 1
type: string
namespace:
description: >-
Name is the name of the referent. When unspecified,
this authentication refers to the local namespace.
maxLength: 253
type: string

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,87 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: meshtlsauthentications.policy.linkerd.io
annotations:
{{ include "partials.annotations.created-by" . }}
labels:
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
linkerd.io/control-plane-ns: {{.Release.Namespace}}
spec:
group: policy.linkerd.io
scope: Namespaced
names:
kind: MeshTLSAuthentication
plural: meshtlsauthentications
singular: meshtlsauthentication
shortNames: [meshtlsauthn]
versions:
- name: v1alpha1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
required: [spec]
properties:
spec:
description: >-
MeshTLSAuthentication defines a list of authenticated client IDs
to be referenced by an `AuthorizationPolicy`. If a client
connection has the mutually-authenticated identity that matches
ANY of the of the provided identities, the connection is
considered authenticated.
type: object
oneOf:
- required: [identities]
- required: [identityRefs]
properties:
identities:
description: >-
Authorizes clients with the provided proxy identity strings
(as provided via MTLS)
The `*` prefix can be used to match all identities in
a domain. An identity string of `*` indicates that
all authentication clients are authorized.
type: array
minItems: 1
items:
type: string
identityRefs:
type: array
minItems: 1
items:
type: object
required:
- kind
properties:
group:
description: >-
Group is the group of the referent. When empty, the
Kubernetes core API group is inferred."
maxLength: 253
pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
type: string
kind:
description: >-
Kind is the kind of the referent.
maxLength: 63
minLength: 1
pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$
type: string
name:
description: >-
Name is the name of the referent. When unspecified,
this refers to all resources of the specified Group
and Kind in the specified namespace.
maxLength: 253
minLength: 1
type: string
namespace:
description: >-
Name is the name of the referent. When unspecified,
this authentication refers to the local namespace.
maxLength: 253
type: string

View File

@ -0,0 +1,53 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: networkauthentications.policy.linkerd.io
annotations:
{{ include "partials.annotations.created-by" . }}
labels:
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
linkerd.io/control-plane-ns: {{.Release.Namespace}}
spec:
group: policy.linkerd.io
scope: Namespaced
names:
kind: NetworkAuthentication
plural: networkauthentications
singular: networkauthentication
shortNames: [netauthn, networkauthn]
versions:
- name: v1alpha1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
required: [spec]
properties:
spec:
description: >-
NetworkAuthentication defines a list of authenticated client
networks to be referenced by an `AuthorizationPolicy`. If a
client connection originates from ANY of the of the provided
networks, the connection is considered authenticated.
type: object
required: [networks]
properties:
networks:
type: array
items:
type: object
required: [cidr]
properties:
cidr:
description: >-
The CIDR of the network to be authorized.
type: string
except:
description: >-
A list of IP networks/addresses not to be included in
the above `cidr`.
type: array
items:
type: string

View File

@ -0,0 +1,266 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: serverauthorizations.policy.linkerd.io
annotations:
{{ include "partials.annotations.created-by" . }}
labels:
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
linkerd.io/control-plane-ns: {{.Release.Namespace}}
spec:
group: policy.linkerd.io
scope: Namespaced
names:
kind: ServerAuthorization
plural: serverauthorizations
singular: serverauthorization
shortNames: [saz, serverauthz, srvauthz]
versions:
- name: v1alpha1
served: true
storage: false
deprecated: true
deprecationWarning: "policy.linkerd.io/v1alpha1 ServerAuthorization is deprecated; use policy.linkerd.io/v1beta1 ServerAuthorization"
schema:
openAPIV3Schema:
type: object
required: [spec]
properties:
spec:
description: >-
Authorizes clients to communicate with Linkerd-proxied servers.
type: object
required: [server, client]
properties:
server:
description: >-
Identifies servers in the same namespace for which this
authorization applies.
Only one of `name` or `selector` may be specified.
type: object
oneOf:
- required: [name]
- required: [selector]
properties:
name:
description: References a `Server` instance by name
type: string
pattern: '^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
selector:
description: >-
A label query over servers on which this authorization applies.
type: object
properties:
matchLabels:
type: object
x-kubernetes-preserve-unknown-fields: true
matchExpressions:
type: array
items:
type: object
required: [key, operator]
properties:
key:
type: string
operator:
type: string
enum: [In, NotIn, Exists, DoesNotExist]
values:
type: array
items:
type: string
client:
description: Describes clients authorized to access a server.
type: object
properties:
networks:
description: >-
Limits the client IP addresses to which this
authorization applies. If unset, the server chooses a
default (typically, all IPs or the cluster's pod
network).
type: array
items:
type: object
required: [cidr]
properties:
cidr:
type: string
except:
type: array
items:
type: string
unauthenticated:
description: >-
Authorizes unauthenticated clients to access a server.
type: boolean
meshTLS:
type: object
properties:
unauthenticatedTLS:
type: boolean
description: >-
Indicates that no client identity is required for
communication.
This is mostly important for the identity
controller, which must terminate TLS connections
from clients that do not yet have a certificate.
identities:
description: >-
Authorizes clients with the provided proxy identity
strings (as provided via MTLS)
The `*` prefix can be used to match all identities in
a domain. An identity string of `*` indicates that
all authentication clients are authorized.
type: array
items:
type: string
pattern: '^(\*|[a-z0-9]([-a-z0-9]*[a-z0-9])?)(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$'
serviceAccounts:
description: >-
Authorizes clients with the provided proxy identity
service accounts (as provided via MTLS)
type: array
items:
type: object
required: [name]
properties:
name:
description: The ServiceAccount's name.
type: string
pattern: '^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
namespace:
description: >-
The ServiceAccount's namespace. If unset, the
authorization's namespace is used.
type: string
pattern: '^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
- name: v1beta1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
required: [spec]
properties:
spec:
description: >-
Authorizes clients to communicate with Linkerd-proxied servers.
type: object
required: [server, client]
properties:
server:
description: >-
Identifies servers in the same namespace for which this
authorization applies.
Only one of `name` or `selector` may be specified.
type: object
oneOf:
- required: [name]
- required: [selector]
properties:
name:
description: References a `Server` instance by name
type: string
pattern: '^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
selector:
description: >-
A label query over servers on which this authorization applies.
type: object
properties:
matchLabels:
type: object
x-kubernetes-preserve-unknown-fields: true
matchExpressions:
type: array
items:
type: object
required: [key, operator]
properties:
key:
type: string
operator:
type: string
enum: [In, NotIn, Exists, DoesNotExist]
values:
type: array
items:
type: string
client:
description: Describes clients authorized to access a server.
type: object
properties:
networks:
description: >-
Limits the client IP addresses to which this
authorization applies. If unset, the server chooses a
default (typically, all IPs or the cluster's pod
network).
type: array
items:
type: object
required: [cidr]
properties:
cidr:
type: string
except:
type: array
items:
type: string
unauthenticated:
description: >-
Authorizes unauthenticated clients to access a server.
type: boolean
meshTLS:
type: object
properties:
unauthenticatedTLS:
type: boolean
description: >-
Indicates that no client identity is required for
communication.
This is mostly important for the identity
controller, which must terminate TLS connections
from clients that do not yet have a certificate.
identities:
description: >-
Authorizes clients with the provided proxy identity
strings (as provided via MTLS)
The `*` prefix can be used to match all identities in
a domain. An identity string of `*` indicates that
all authentication clients are authorized.
type: array
items:
type: string
pattern: '^(\*|[a-z0-9]([-a-z0-9]*[a-z0-9])?)(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$'
serviceAccounts:
description: >-
Authorizes clients with the provided proxy identity
service accounts (as provided via MTLS)
type: array
items:
type: object
required: [name]
properties:
name:
description: The ServiceAccount's name.
type: string
pattern: '^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
namespace:
description: >-
The ServiceAccount's namespace. If unset, the
authorization's namespace is used.
type: string
pattern: '^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
additionalPrinterColumns:
- name: Server
type: string
description: The server that this grants access to
jsonPath: .spec.server.name

View File

@ -0,0 +1,319 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: servers.policy.linkerd.io
annotations:
{{ include "partials.annotations.created-by" . }}
labels:
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
linkerd.io/control-plane-ns: {{.Release.Namespace}}
spec:
group: policy.linkerd.io
names:
kind: Server
plural: servers
singular: server
shortNames: [srv]
scope: Namespaced
versions:
- name: v1alpha1
served: true
storage: false
deprecated: true
deprecationWarning: "policy.linkerd.io/v1alpha1 Server is deprecated; use policy.linkerd.io/v1beta1 Server"
schema:
openAPIV3Schema:
type: object
required: [spec]
properties:
spec:
type: object
required:
- podSelector
- port
properties:
podSelector:
type: object
description: >-
Selects pods in the same namespace.
oneOf:
- required: [matchExpressions]
- required: [matchLabels]
properties:
matchLabels:
type: object
x-kubernetes-preserve-unknown-fields: true
matchExpressions:
type: array
items:
type: object
required: [key, operator]
properties:
key:
type: string
operator:
type: string
enum: [In, NotIn, Exists, DoesNotExist]
values:
type: array
items:
type: string
port:
description: >-
A port name or number. Must exist in a pod spec.
x-kubernetes-int-or-string: true
proxyProtocol:
description: >-
Configures protocol discovery for inbound connections.
Supersedes the `config.linkerd.io/opaque-ports` annotation.
type: string
default: unknown
- name: v1beta1
served: true
storage: false
deprecated: true
deprecationWarning: "policy.linkerd.io/v1alpha1 Server is deprecated; use policy.linkerd.io/v1beta3 Server"
schema:
openAPIV3Schema:
type: object
required: [spec]
properties:
spec:
type: object
required:
- podSelector
- port
properties:
podSelector:
type: object
description: >-
Selects pods in the same namespace.
The result of matchLabels and matchExpressions are ANDed.
Selects all if empty.
properties:
matchLabels:
type: object
x-kubernetes-preserve-unknown-fields: true
matchExpressions:
type: array
items:
type: object
required: [key, operator]
properties:
key:
type: string
operator:
type: string
enum: [In, NotIn, Exists, DoesNotExist]
values:
type: array
items:
type: string
port:
description: >-
A port name or number. Must exist in a pod spec.
x-kubernetes-int-or-string: true
proxyProtocol:
description: >-
Configures protocol discovery for inbound connections.
Supersedes the `config.linkerd.io/opaque-ports` annotation.
type: string
default: unknown
additionalPrinterColumns:
- name: Port
type: string
description: The port the server is listening on
jsonPath: .spec.port
- name: Protocol
type: string
description: The protocol of the server
jsonPath: .spec.proxyProtocol
- name: v1beta2
served: true
storage: false
schema:
openAPIV3Schema:
type: object
required: [spec]
properties:
spec:
type: object
required:
- port
oneOf:
- required: [podSelector]
- required: [externalWorkloadSelector]
properties:
podSelector:
type: object
description: >-
Selects pods in the same namespace.
The result of matchLabels and matchExpressions are ANDed.
Selects all if empty.
properties:
matchLabels:
type: object
x-kubernetes-preserve-unknown-fields: true
matchExpressions:
type: array
items:
type: object
required: [key, operator]
properties:
key:
type: string
operator:
type: string
enum: [In, NotIn, Exists, DoesNotExist]
values:
type: array
items:
type: string
externalWorkloadSelector:
type: object
description: >-
Selects ExternalWorkloads in the same namespace.
The result of matchLabels and matchExpressions are ANDed.
Selects all if empty.
properties:
matchLabels:
type: object
x-kubernetes-preserve-unknown-fields: true
matchExpressions:
type: array
items:
type: object
required: [key, operator]
properties:
key:
type: string
operator:
type: string
enum: [In, NotIn, Exists, DoesNotExist]
values:
type: array
items:
type: string
port:
description: >-
A port name or number. Must exist in a pod spec.
x-kubernetes-int-or-string: true
proxyProtocol:
description: >-
Configures protocol discovery for inbound connections.
Supersedes the `config.linkerd.io/opaque-ports` annotation.
type: string
default: unknown
additionalPrinterColumns:
- name: Port
type: string
description: The port the server is listening on
jsonPath: .spec.port
- name: Protocol
type: string
description: The protocol of the server
jsonPath: .spec.proxyProtocol
- name: v1beta3
served: true
storage: true
schema:
openAPIV3Schema:
type: object
required: [spec]
properties:
spec:
type: object
required:
- port
oneOf:
- required: [podSelector]
- required: [externalWorkloadSelector]
properties:
accessPolicy:
type: string
default: deny
description: >-
Default access policy to apply when the traffic doesn't match any of the policy rules.
podSelector:
type: object
description: >-
Selects pods in the same namespace.
The result of matchLabels and matchExpressions are ANDed.
Selects all if empty.
properties:
matchLabels:
type: object
x-kubernetes-preserve-unknown-fields: true
matchExpressions:
type: array
items:
type: object
required: [key, operator]
properties:
key:
type: string
operator:
type: string
enum: [In, NotIn, Exists, DoesNotExist]
values:
type: array
items:
type: string
externalWorkloadSelector:
type: object
description: >-
Selects ExternalWorkloads in the same namespace.
The result of matchLabels and matchExpressions are ANDed.
Selects all if empty.
properties:
matchLabels:
type: object
x-kubernetes-preserve-unknown-fields: true
matchExpressions:
type: array
items:
type: object
required: [key, operator]
properties:
key:
type: string
operator:
type: string
enum: [In, NotIn, Exists, DoesNotExist]
values:
type: array
items:
type: string
port:
description: >-
A port name or number. Must exist in a pod spec.
x-kubernetes-int-or-string: true
proxyProtocol:
description: >-
Configures protocol discovery for inbound connections.
Supersedes the `config.linkerd.io/opaque-ports` annotation.
type: string
default: unknown
additionalPrinterColumns:
- name: Port
type: string
description: The port the server is listening on
jsonPath: .spec.port
- name: Protocol
type: string
description: The protocol of the server
jsonPath: .spec.proxyProtocol
- name: Access Policy
type: string
description: The default access policy applied when the traffic doesn't match any of the policy rules
jsonPath: .spec.accessPolicy

Some files were not shown because too many files have changed in this diff Show More